text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import sys
import re
import os
import csv
import numpy as np
from operator import itemgetter
from time import time
from extract_toc import parseargs
from predict_using_toc_mapper import Mapper, get_topic, read_model_file
from find_topics import toc_entries, get_summary_map, read_topics
import dateparser
import check_309
import parse_name
from get_ratings import Ratings, Ratings2
import delimiterwriter
from predict_using_subtopic_mapper import SubtopicPredictor
from analyze_topic import SubtopicReader
from headerutil import *
# NEWLINE_WITHIN_COLUMN = '\r\n'
# NEWLINE_WITHIN_COLUMN = '\r\n'
# CSV_LINE_TERMINATOR = '\r\n'
# CSV_FIELD_DELIMITER = ','
# FD_REPLACED = None
p_region = re.compile('(^|.*\s+)region(\s*[:\']\s*|\s+)(.*)?\s*$', re.IGNORECASE)
p_region_with_other = re.compile('(.*)?\s{5,}(certificate\s+num[bh]e[ir]|certificate|charter\s+num[bh]er|charter|field\s+offic\s*e|url)\s*:?\s*(.*)?\s*$', re.IGNORECASE)
p_blank = re.compile('^\s*$')
p_from_first_uppercase_char = re.compile('^.*?([A-Z].*)$', re.MULTILINE)
p_cert_direct = re.compile('(^|^.*\s+)(certificate\s+number)(\s*:\s*|\s+)(\w+).*$', re.IGNORECASE)
p_region_direct = re.compile('(^|^.*\s+)(region)(\s*:\s*|\s+)(\w+).*$', re.IGNORECASE)
p_patterns_str = {
'bank_name' : [
'bank\s+name',
'institution\s+name',
'name'
],
'bank_location': [
'location'
],
'examiner_in_charge': [
'examiner[\s\-]*in[\s\-]*charge'
],
'exam_start_date': [
'examination[\s\-]*start[\s\-]*date'
],
'exam_date': [
'examination[\s\-]*date'
],
'exam_as_of_date': [
'examination[\s\-]*as[\s\-]*of[\s\-]*date'
]
}
all_matched = {}
for k,patterns in p_patterns_str.items():
all_matched[k] = []
p_patterns = {}
for k,patterns in p_patterns_str.items():
p_patterns[k] = [re.compile('(^|.*\s+)' + p + '(\s*[:\'][\'\s\.]*|[\'\s\.]+)' + '(.*)?\s*$', re.IGNORECASE) for p in patterns]
def get_pattern(line, pat):
ret = []
for i, p in enumerate(p_patterns[pat]):
quality = 0
m = p.match(line)
if (m):
st = m.group(1)
sep = m.group(2)
val = m.group(3)
vals = re.split('\s{5,}', val)
val = vals[0]
if (not st.strip()):
quality += 1 ## Higher quality for line starting pattern
else:
if (len(st) - len(st.rstrip()) < 2): ## Just one space
# quality -= 1
quality -= 0 ## Ignore this one for now
if (sep.strip() ==':'):
quality += 1 ## Higher qiuality in presence of :
if (len(vals) == 1):
quality += 1
ret.append((p_patterns_str[pat][i], val, quality))
return ret
def match_pattern(line, pat):
global all_matched
all_matched[pat] += get_pattern(line, pat)
def match_all_patterns(line):
for pat in p_patterns.keys():
match_pattern(line, pat)
"""
def best_match(pat):
# print('In best match', pat)
all_m = all_matched.get(pat)
if (all_m):
l = sorted(all_matched.get(pat), key=lambda x: (-1 * p_patterns_str[pat].index(x[0]), x[2]), reverse=True)
# print('Best match sorted list', l)
if (l):
if (l[0][2] > 0): ## Quality more than zero
return l[0]
"""
def best_match(pat, validationfn = None):
# print('In best match', pat)
all_m = all_matched.get(pat)
if (all_m):
l = sorted(all_matched.get(pat), key=lambda x: (-1 * p_patterns_str[pat].index(x[0]), x[2]), reverse=True)
# print('Best match sorted list', l)
if (l):
if (validationfn):
for item in l:
if (item[2] >= 0 and validationfn(item[1])): ## Quality more than zero
return item
else:
if (l[0][2] >= 0): ## Quality more than zero
return l[0]
"""
def best_match_text(pat):
bm_tuple = best_match(pat)
if (bm_tuple and len(bm_tuple) == 3):
return bm_tuple[1]
else:
return ""
"""
def best_match_text(pat, validationfn = None):
bm_tuple = best_match(pat, validationfn)
if (bm_tuple and len(bm_tuple) == 3):
return bm_tuple[1]
else:
return ""
def format_eic(eic_name):
if (not eic_name):
return eic_name
words = re.split('\s+', eic_name)
new_eic = []
for i, word in enumerate(words):
if word.endswith(';'):
new_eic += [word[:-1]]
break
if word[0].islower():
break
new_eic += [word]
return ' '.join(new_eic)
def format_date(dt):
if (not dt):
return dt
parts = re.split('\s{3,}', dt)
if (len(parts) >= 1):
return parts[0]
else:
return ""
def get_cert_from_line(line):
if not line:
return "";
m = p_cert_direct.match(line)
if m:
return m.group(4)
else:
return ""
def readfile(filename):
for line in open(filename, 'r', encoding='latin1'):
yield(line[:-1])
def remove_punct(str):
return re.sub(r'[^\w\s]','',str).strip()
def format_cert_number(cert):
return remove_punct(cert)
def format_region(region):
if not region:
return region
split_by_extra_spaces = re.split('\s{5,}', remove_punct(region))
return split_by_extra_spaces[0]
def format_bank_str(str):
if not str:
return str
m = p_from_first_uppercase_char.match(str.strip())
if (m):
return singlespace(m.group(1))
else:
return singlespace(str)
def singlespace(sent):
return ' '.join(re.split('\s+', sent))
def separate_cert(str):
cert = ""
newstr = str
m_str_with_other = p_region_with_other.match(str)
if (m_str_with_other):
newstr = m_str_with_other.group(1)
if m_str_with_other.group(2).lower() == "charter" and m_str_with_other.group(3).lower() == "bank":
return str, cert
if (m_str_with_other.group(2).strip().lower().startswith('certificate')):
cert = m_str_with_other.group(3)
return newstr, cert
def prev_nonblank_line(lines, lineno):
# print('In prev_nonblank_line', lineno, len(lines))
while lineno > 0:
lineno -= 1
line = lines[lineno]
if (p_blank.match(line)):
continue
else:
# print("Non Blank Line = ", line)
return lineno, line
return -1, ""
def init_all_matched():
for k,patterns in p_patterns_str.items():
all_matched[k] = []
def get_header_for_file(filename):
# global all_matched
init_all_matched()
region = ""
cert = ""
bank_name = ""
bank_location = ""
lines = []
lineno = 0
ff = chr(12)
prev_region_match_quality = 0
for line in readfile(filename):
# if line and ord(line[0]) == 12: ## Line starts with control-L
# line = line[1:]
if line: ## Delete all form feed characters
line = line.replace(ff, "")
# line = re.sub('\s+', ' ', line) ## Compress multile spaces to a single space character
lines += [line]
match_all_patterns(line)
m_region = p_region.match(line)
if (m_region):
if (m_region.group(1).strip() == ""):
if (m_region.group(2).strip() == ":"):
region_match_quality = 3
else:
region_match_quality = 2
else:
region_match_quality = 1
if (region_match_quality >= prev_region_match_quality):
prev_region_match_quality = region_match_quality
region = m_region.group(3)
# print("Region = ", region)
region, certx = separate_cert(region)
if (not cert):
cert = certx
# print("Evaluating previous lines:", line)
if (m_region.group(1).strip() == ""):
location_line, bank_location = prev_nonblank_line(lines, lineno)
bank_location, cert2 = separate_cert(bank_location)
if (not cert):
cert = cert2
bank_line, bank_name = prev_nonblank_line(lines, location_line)
# print("Bank Name = ", bank_name)
bank_name, cert2 = separate_cert(bank_name)
if (not cert):
cert = cert2
# print("Bank Name = ", bank_name)
if (not cert):
cert = get_cert_from_line(line)
lineno += 1
# print(all_matched)
if (not bank_name):
bank_name = best_match_text('bank_name')
if (not bank_location):
bank_location = best_match_text('bank_location')
examiner_in_charge = format_eic(best_match_text('examiner_in_charge'))
eic_first_name, eic_middle_name, eic_last_name, eic_suffix = parse_name.parse_name(examiner_in_charge)
exam_start_date = format_date(best_match_text('exam_start_date', dateparser.get_date))
if (not exam_start_date):
exam_start_date = format_date(best_match_text('exam_date', dateparser.get_date))
exam_start_year, exam_start_month, exam_start_day, exam_start_date_formatted = dateparser.get_year_month_day(exam_start_date)
exam_as_of_date = format_date(best_match_text('exam_as_of_date', dateparser.get_date))
exam_as_of_year, exam_as_of_month, exam_as_of_day, exam_as_of_date_formatted = dateparser.get_year_month_day(exam_as_of_date)
return (lines,
format_region(region).title().replace(' ', '_'),
format_cert_number(cert),
format_bank_str(bank_name).replace(' ', '_'),
format_bank_str(bank_location),
eic_first_name,
eic_middle_name,
eic_last_name,
exam_start_date_formatted,
exam_start_year, exam_start_month,
exam_as_of_date_formatted,
exam_as_of_year, exam_as_of_month)
def multiply_array(arr, factor):
if (factor > 1):
subscript = True
else:
subscript = False
if subscript:
return [newval + '_' + str(i+1) for subarr in [[val] * factor for val in arr] for i,newval in enumerate(subarr)]
else:
return [newval for subarr in [[val] * factor for val in arr] for i,newval in enumerate(subarr)]
def format_headercol(header):
header_new = re.sub('[\-\/]', ' ', header)
header_new = re.sub('\'', '', header_new)
header_new = re.sub(' +', '_', header_new)
return header_new
def replace_list(orig, start, end, new):
orig[start:end] = new
def find_in_list(lst, pattern):
p = re.compile("^" + pattern + "_?[0-9]*$", re.IGNORECASE)
inds = [i for i,val in enumerate(lst) if p.match(val)]
if (inds and len(inds) > 0):
return (inds[0], inds[-1] + 1)
def get_headers_for_files(files, topics, mapper, summary_map, outfile, exfile, nosplit, topic_split_times, ratings, smodels = None, stopics = None):
if (not outfile):
if (len(CSV_FIELD_DELIMITER) == 1):
writer = csv.writer(sys.stdout, delimiter = CSV_FIELD_DELIMITER, lineterminator = CSV_LINE_TERMINATOR)
else:
writer = delimiterwriter.writer(sys.stdout, CSV_FIELD_DELIMITER, CSV_LINE_TERMINATOR, FD_REPLACED)
else:
outf = open(outfile, 'w')
if (len(CSV_FIELD_DELIMITER) == 1):
writer = csv.writer(outf, delimiter = CSV_FIELD_DELIMITER, lineterminator = CSV_LINE_TERMINATOR)
else:
writer = delimiterwriter.writer(outf, CSV_FIELD_DELIMITER, CSV_LINE_TERMINATOR, FD_REPLACED)
exf = open(exfile, 'w')
headerline = [
'serial_no',
'file_name',
'region',
'certificate_number',
'bank_name',
'bank_location',
'examiner_in_charge_first_name',
'examiner_in_charge_middle_name',
'examiner_in_charge_last_name',
'exam_start_date',
'exam_start_year',
'exam_start_month',
'exam_as_of_date',
'exam_as_of_year',
'exam_as_of_month'
]
headerline += ratings.get_column_headers()
topic_start_index = len(headerline)
# topic_split_times = 4
topics.append('Confidential')
if (nosplit):
headerline += topics
else:
headerline += multiply_array(topics, topic_split_times)
# headerline_no_spaces = [headercol.replace(' ', '_') for headercol in headerline]
smodelfile=None
stopic=None
if (smodels and len(smodels) > 0):
smodelfile= smodels[0]
if (stopics and len(stopics) > 0):
stopic= stopics[0]
# smodelfile = "model_sub_svc.pkl"
# stopic = "IT Assessment"
# print("smodelfile = %s, stopic = %s" % (smodelfile, stopic))
if smodelfile:
subtopicReader = SubtopicReader(stopic, mapper, summary_map)
subtopicPredictor = SubtopicPredictor(smodelfile)
subtopic_columns = subtopicReader.get_column_names(subtopicPredictor)
stcol_start, stcol_end = find_in_list(headerline, stopic)
headerline[stcol_start:stcol_end] = subtopic_columns
headerline_no_spaces = [format_headercol(headercol) for headercol in headerline]
writer.writerow(headerline_no_spaces)
serial=0
start_time = time()
for filename in files:
file_time = time()
# print("Processing file %s at %f" % (filename, (file_time - start_time)))
filedata = get_header_for_file(filename)
file_time = time()
# print("Processing file %s at %f" % (filename, (file_time - start_time)))
rowdata = [os.path.basename(filename)]
lines = filedata[0]
## Only write to exception file if part 309 is not present
# if not check_309.has_part_309_sents(lines):
# exf.write(os.path.abspath(filename) + '\r\n')
# continue
# print(summary_map)
no_topics = summary_map.get(os.path.basename(filename))
# print("No Topics", no_topics)
guess_allowed = False
if (not no_topics or no_topics == 0):
guess_allowed = True
topic_list = read_topics(filename, mapper, guess_allowed)
# print(topic_list)
if nosplit:
topic_data = ["" for i in range(len(topics))]
else:
topic_data = ["" for i in range(len(topics) * topic_split_times)]
# print("Topic Data length", len(topic_data))
no_topics_in_doc = len(topic_list)
if smodelfile:
stcol_topic_start, stcol_topic_end = find_in_list(topics, stopic)
if (not nosplit):
## Readjust for split columns
stcol_topic_start = stcol_topic_start * topic_split_times
stcol_topic_end = stcol_topic_start + topic_split_times
stopic_start_line, stopic_end_line = None,None
for i, topic_line in enumerate(topic_list):
start_line = topic_line[0]
if (i < no_topics_in_doc -1):
end_line = topic_list[i+1][0]
if nosplit:
topic_text = NEWLINE_WITHIN_COLUMN.join(lines[start_line:end_line])
else:
topic_texts_split = break_into_pieces(lines[start_line:end_line], NEWLINE_WITHIN_COLUMN)
else:
end_line = None
if nosplit:
topic_text = NEWLINE_WITHIN_COLUMN.join(lines[start_line:])
else:
topic_texts_split = break_into_pieces(lines[start_line:], NEWLINE_WITHIN_COLUMN)
topic_name = topic_line[2]
topic_index = topics.index(topic_name)
if nosplit:
topic_data[topic_index] = topic_text
# topic_data[topic_index] = topic_text[:32000]
else:
if (len(topic_texts_split) > topic_split_times):
print('Problem in file %s for topic %s, number of splits needed is %d' % (os.path.basename(filename), topic_name, len(topic_texts_split)))
for topic_subindex in range(len(topic_texts_split)):
# print('Setting:', topic_index, topic_index * topic_split_times + topic_subindex)
topic_data[topic_index * topic_split_times + topic_subindex] = topic_texts_split[topic_subindex]
## Handle Subtopics
if smodelfile and topic_name == stopic:
stopic_start_line, stopic_end_line = start_line, end_line
# topic_data[topic_index] = topic_text[:300]
# if (len(topic_text) > 32000):
# print(rowdata[0], topic_name, topic_index, len(topic_text))
# if (len(topic_text) > 32000):
# print(topic_text)
# print(topic_name, topic_index)
# print('======================================================')
# print(topic_text)
if smodelfile:
## If Subtopic lines exit
if stopic_start_line:
subtopics_dict = subtopicReader.mapped_subtopics_from_lines(lines, stopic_start_line, stopic_end_line, subtopicPredictor)
subtopics_arr = subtopicReader.subtopic_array(subtopics_dict)
topic_data[stcol_topic_start:stcol_topic_end] = subtopics_arr
else:
topic_data[stcol_topic_start:stcol_topic_end] = subtopicReader.empty_subtopics(subtopicPredictor)
serial += 1
rowdata.insert(0, serial)
rowdata += filedata[1:]
ratings.process_file(filename)
ratings.map_ratings()
rowdata += ratings.get_column_data()
rowdata += topic_data
writer.writerow(rowdata)
if (outfile):
outf.close()
if (exf):
exf.close()
def break_into_pieces(lines, newlinechar, chunksize=32000):
fields = []
field = ""
fieldlen = 0
newlinelen = len(newlinechar)
for i, line in enumerate(lines):
if (fieldlen + len(line) + newlinelen) > chunksize:
fields.append(field)
field = ""
fieldlen = 0
if (field):
field += newlinechar
field += line
fieldlen = fieldlen + len(line) + newlinelen
if field:
fields.append(field)
return fields
def main(args):
global NEWLINE_WITHIN_COLUMN
argsmap = parseargs(args)
files = argsmap.get('files')
if (not files):
sys.exit(0)
summaryfile = argsmap.get("summary")
if (not summaryfile or len(summaryfile) == 0):
print('Summary file must be specified...')
sys.exit(1)
summary_map = get_summary_map(summaryfile[0])
# print(summary_map)
modelfile = argsmap.get("model")
if (not modelfile):
print('Model must be specified...')
sys.exit(1)
modelfile = modelfile[0]
(origmap, sorted_y, vectorizer, le, grid_search) = read_model_file(modelfile)
topics = toc_entries(origmap)
mapper = Mapper(origmap, sorted_y, vectorizer, le, grid_search)
nosplit = argsmap.get('nosplit')
if nosplit == []:
nosplit = True
else:
nosplit = False
if not nosplit:
topic_split_times = argsmap.get('split')
if (not topic_split_times):
topic_split_times = 4
else:
topic_split_times = int(topic_split_times[0])
else:
topic_split_times = 0
NL = argsmap.get('NL') ## Set newline character for multiline columns
if (NL):
NL = NL[0]
if (NL):
NEWLINE_WITHIN_COLUMN = NL
outfile = argsmap.get("out")
if (outfile):
outfile = outfile[0]
exfile = argsmap.get("err")
if exfile:
exfile = exfile[0]
if not exfile:
print("Exception file name must be entered using the --err option...")
sys.exit(1)
ratings_mapper_file = argsmap.get("rmap")
if ratings_mapper_file:
ratings_mapper_file = ratings_mapper_file[0]
if not ratings_mapper_file:
print("Ratings Mapper File file name must be entered using the --rmap option...")
sys.exit(1)
ratings = Ratings(ratings_mapper_file)
global CSV_FIELD_DELIMITER
field_delim = argsmap.get('fd')
if field_delim:
field_delim = field_delim[0]
if field_delim:
CSV_FIELD_DELIMITER = field_delim
global FD_REPLACED
fd_replaced = argsmap.get('fdr')
if fd_replaced:
fd_replaced = fd_replaced[0]
if fd_replaced:
FD_REPLACED = fd_replaced
smodels = argsmap.get("smodels")
stopics = argsmap.get("stopics")
get_headers_for_files(files, topics, mapper, summary_map, outfile, exfile, nosplit, topic_split_times, ratings, smodels, stopics)
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
|
{
"content_hash": "caef39546a20910b6e832b313fde502c",
"timestamp": "",
"source": "github",
"line_count": 605,
"max_line_length": 169,
"avg_line_length": 31.97190082644628,
"alnum_prop": 0.6089024453290596,
"repo_name": "rupendrab/py_unstr_parse",
"id": "2beceb42e5cec9a0cbdc33d7b81f11bb40ea35df",
"size": "19369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_headers_v1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133839"
}
],
"symlink_target": ""
}
|
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
{
"content_hash": "85816234e5a09c02ec1d415a330e6f44",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 84,
"avg_line_length": 33.304582210242586,
"alnum_prop": 0.5734865652314665,
"repo_name": "kubeflow/kfserving-lts",
"id": "e5f17ec2035c323d48388a973d19a44b1530ef29",
"size": "12971",
"binary": false,
"copies": "2",
"ref": "refs/heads/release-0.6",
"path": "docs/samples/v1beta1/triton/bert/bert_tokenizer_v2/bert_transformer_v2/tokenization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "224"
},
{
"name": "Dockerfile",
"bytes": "10549"
},
{
"name": "Go",
"bytes": "1251102"
},
{
"name": "HTML",
"bytes": "17922"
},
{
"name": "JavaScript",
"bytes": "1828"
},
{
"name": "Jsonnet",
"bytes": "2434415"
},
{
"name": "Makefile",
"bytes": "16071"
},
{
"name": "Python",
"bytes": "1860674"
},
{
"name": "SCSS",
"bytes": "1789"
},
{
"name": "Shell",
"bytes": "36788"
},
{
"name": "TypeScript",
"bytes": "78886"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('catalog.views',
url(r'(?P<item_slug>.+)$', 'view_item'),
url(r'$', 'index'),
)
|
{
"content_hash": "bcb4a20c6ff311df0a91b8cbd2868de4",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 60,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.6686746987951807,
"repo_name": "wd5/jangr",
"id": "1de936ee0488da9204c8a3be83f2e2875fb45309",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalog/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
__all__ = ["interact"]
import jnupy
import sys
def input(prompt=None):
if prompt is not None:
print(prompt, end="")
result = jnupy.input()
if result is None:
raise EOFError
return result
def mp_repl_continue_with_input(line):
# check for blank input
if not line:
return False
# check for escape char in terminal
if "\x1b" in line:
return False
# check if input starts with a certain keyword
starts_with_compound_keyword = False
for keyword in "@", "if", "while", "for", "try", "with", "def", "class":
starts_with_compound_keyword = starts_with_compound_keyword or line.startswith(keyword)
# check for unmatched open bracket or triple quote
# TODO don't look at triple quotes inside single quotes
n_paren = n_brack = n_brace = 0
in_triple_quote = 0
passed = 0
for charno, char in enumerate(line):
if passed:
passed -= 1
continue
elif char == '(': n_paren += 1
elif char == ')': n_paren -= 1
elif char == '[': n_brack += 1
elif char == ']': n_brack -= 1
elif char == '{': n_brace += 1
elif char == '}': n_brace -= 1
elif char == "'":
if chr(in_triple_quote) != '"' and line[charno+1:charno+2] == line[charno+2:charno+3] == "'":
passed += 2;
in_triple_quote = ord("'") - in_triple_quote
elif char == '"':
if chr(in_triple_quote) != "'" and line[charno+1:charno+2] == line[charno+2:charno+3] == '"':
passed += 2;
in_triple_quote = ord('"') - in_triple_quote
# continue if unmatched brackets or quotes
if n_paren > 0 or n_brack > 0 or n_brace > 0 or in_triple_quote != 0:
return True
# continue if last character was backslash (for line continuation)
if line.endswith('\\'):
return True
# continue if compound keyword and last line was not empty
if starts_with_compound_keyword and not line.endswith('\n'):
return True
# otherwise, don't continue
return False
def interact(banner=None, readfunc=None, local=None):
if readfunc is None:
readfunc = input
if banner is None:
banner = "Micro Python {} on {}; {} version".format(jnupy.get_version("MICROPY_GIT_TAG"), jnupy.get_version("MICROPY_BUILD_DATE"), sys.platform)
if local is None:
local = dict()
print(banner)
while True:
try:
code = readfunc(">>> ")
except EOFError:
print()
continue
while mp_repl_continue_with_input(code):
try:
code += "\n" + readfunc("... ")
except EOFError:
print()
continue
try:
fun = compile(code, "<stdin>", "single")
except SyntaxError:
sys.print_exception(sys.exc_info()[1])
continue
try:
exec(fun, local, local)
except SystemExit:
raise
except:
sys.print_exception(sys.exc_info()[1])
|
{
"content_hash": "51e26f0449ea30e2a8518f14d8884bbc",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 152,
"avg_line_length": 29.78301886792453,
"alnum_prop": 0.5388026607538803,
"repo_name": "EcmaXp/micropython",
"id": "ed2b256e68325e22f43151fb1b63fcf6569b110d",
"size": "3157",
"binary": false,
"copies": "1",
"ref": "refs/heads/opencom",
"path": "opencom/lib/code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "54659"
},
{
"name": "C",
"bytes": "34008975"
},
{
"name": "C++",
"bytes": "620361"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "72080"
},
{
"name": "Objective-C",
"bytes": "392029"
},
{
"name": "Python",
"bytes": "554775"
},
{
"name": "Shell",
"bytes": "4829"
}
],
"symlink_target": ""
}
|
import functools
import re
import string
import typing as t
if t.TYPE_CHECKING:
import typing_extensions as te
class HasHTML(te.Protocol):
def __html__(self) -> str:
pass
__version__ = "2.1.2.dev0"
_strip_comments_re = re.compile(r"<!--.*?-->", re.DOTALL)
_strip_tags_re = re.compile(r"<.*?>", re.DOTALL)
def _simple_escaping_wrapper(name: str) -> t.Callable[..., "Markup"]:
orig = getattr(str, name)
@functools.wraps(orig)
def wrapped(self: "Markup", *args: t.Any, **kwargs: t.Any) -> "Markup":
args = _escape_argspec(list(args), enumerate(args), self.escape) # type: ignore
_escape_argspec(kwargs, kwargs.items(), self.escape)
return self.__class__(orig(self, *args, **kwargs))
return wrapped
class Markup(str):
"""A string that is ready to be safely inserted into an HTML or XML
document, either because it was escaped or because it was marked
safe.
Passing an object to the constructor converts it to text and wraps
it to mark it safe without escaping. To escape the text, use the
:meth:`escape` class method instead.
>>> Markup("Hello, <em>World</em>!")
Markup('Hello, <em>World</em>!')
>>> Markup(42)
Markup('42')
>>> Markup.escape("Hello, <em>World</em>!")
Markup('Hello <em>World</em>!')
This implements the ``__html__()`` interface that some frameworks
use. Passing an object that implements ``__html__()`` will wrap the
output of that method, marking it safe.
>>> class Foo:
... def __html__(self):
... return '<a href="/foo">foo</a>'
...
>>> Markup(Foo())
Markup('<a href="/foo">foo</a>')
This is a subclass of :class:`str`. It has the same methods, but
escapes their arguments and returns a ``Markup`` instance.
>>> Markup("<em>%s</em>") % ("foo & bar",)
Markup('<em>foo & bar</em>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup('<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(
cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
) -> "Markup":
if hasattr(base, "__html__"):
base = base.__html__()
if encoding is None:
return super().__new__(cls, base)
return super().__new__(cls, base, encoding, errors)
def __html__(self) -> "Markup":
return self
def __add__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
if isinstance(other, str) or hasattr(other, "__html__"):
return self.__class__(super().__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
if isinstance(other, str) or hasattr(other, "__html__"):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num: "te.SupportsIndex") -> "Markup":
if isinstance(num, int):
return self.__class__(super().__mul__(num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg: t.Any) -> "Markup":
if isinstance(arg, tuple):
# a tuple of arguments, each wrapped
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
elif hasattr(type(arg), "__getitem__") and not isinstance(arg, str):
# a mapping of arguments, wrapped
arg = _MarkupEscapeHelper(arg, self.escape)
else:
# a single argument, wrapped with the helper and a tuple
arg = (_MarkupEscapeHelper(arg, self.escape),)
return self.__class__(super().__mod__(arg))
def __repr__(self) -> str:
return f"{self.__class__.__name__}({super().__repr__()})"
def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "Markup":
return self.__class__(super().join(map(self.escape, seq)))
join.__doc__ = str.join.__doc__
def split( # type: ignore
self, sep: t.Optional[str] = None, maxsplit: int = -1
) -> t.List["Markup"]:
return [self.__class__(v) for v in super().split(sep, maxsplit)]
split.__doc__ = str.split.__doc__
def rsplit( # type: ignore
self, sep: t.Optional[str] = None, maxsplit: int = -1
) -> t.List["Markup"]:
return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
rsplit.__doc__ = str.rsplit.__doc__
def splitlines(self, keepends: bool = False) -> t.List["Markup"]: # type: ignore
return [self.__class__(v) for v in super().splitlines(keepends)]
splitlines.__doc__ = str.splitlines.__doc__
def unescape(self) -> str:
"""Convert escaped markup back into a text string. This replaces
HTML entities with the characters they represent.
>>> Markup("Main » <em>About</em>").unescape()
'Main » <em>About</em>'
"""
from html import unescape
return unescape(str(self))
def striptags(self) -> str:
""":meth:`unescape` the markup, remove tags, and normalize
whitespace to single spaces.
>>> Markup("Main »\t<em>About</em>").striptags()
'Main » About'
"""
# Use two regexes to avoid ambiguous matches.
value = _strip_comments_re.sub("", self)
value = _strip_tags_re.sub("", value)
value = " ".join(value.split())
return Markup(value).unescape()
@classmethod
def escape(cls, s: t.Any) -> "Markup":
"""Escape a string. Calls :func:`escape` and ensures that for
subclasses the correct type is returned.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
for method in (
"__getitem__",
"capitalize",
"title",
"lower",
"upper",
"replace",
"ljust",
"rjust",
"lstrip",
"rstrip",
"center",
"strip",
"translate",
"expandtabs",
"swapcase",
"zfill",
):
locals()[method] = _simple_escaping_wrapper(method)
del method
def partition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
l, s, r = super().partition(self.escape(sep))
cls = self.__class__
return cls(l), cls(s), cls(r)
def rpartition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
l, s, r = super().rpartition(self.escape(sep))
cls = self.__class__
return cls(l), cls(s), cls(r)
def format(self, *args: t.Any, **kwargs: t.Any) -> "Markup":
formatter = EscapeFormatter(self.escape)
return self.__class__(formatter.vformat(self, args, kwargs))
def __html_format__(self, format_spec: str) -> "Markup":
if format_spec:
raise ValueError("Unsupported format specification for Markup.")
return self
class EscapeFormatter(string.Formatter):
__slots__ = ("escape",)
def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
self.escape = escape
super().__init__()
def format_field(self, value: t.Any, format_spec: str) -> str:
if hasattr(value, "__html_format__"):
rv = value.__html_format__(format_spec)
elif hasattr(value, "__html__"):
if format_spec:
raise ValueError(
f"Format specifier {format_spec} given, but {type(value)} does not"
" define __html_format__. A class that defines __html__ must define"
" __html_format__ to work with format specifiers."
)
rv = value.__html__()
else:
# We need to make sure the format spec is str here as
# otherwise the wrong callback methods are invoked.
rv = string.Formatter.format_field(self, value, str(format_spec))
return str(self.escape(rv))
_ListOrDict = t.TypeVar("_ListOrDict", list, dict)
def _escape_argspec(
obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
) -> _ListOrDict:
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if isinstance(value, str) or hasattr(value, "__html__"):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper:
"""Helper for :meth:`Markup.__mod__`."""
__slots__ = ("obj", "escape")
def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
self.obj = obj
self.escape = escape
def __getitem__(self, item: t.Any) -> "_MarkupEscapeHelper":
return _MarkupEscapeHelper(self.obj[item], self.escape)
def __str__(self) -> str:
return str(self.escape(self.obj))
def __repr__(self) -> str:
return str(self.escape(repr(self.obj)))
def __int__(self) -> int:
return int(self.obj)
def __float__(self) -> float:
return float(self.obj)
# circular import
try:
from ._speedups import escape as escape
from ._speedups import escape_silent as escape_silent
from ._speedups import soft_str as soft_str
except ImportError:
from ._native import escape as escape
from ._native import escape_silent as escape_silent # noqa: F401
from ._native import soft_str as soft_str # noqa: F401
|
{
"content_hash": "8910359d03910ba5f411af39834040d0",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 88,
"avg_line_length": 31.55593220338983,
"alnum_prop": 0.5657965409818455,
"repo_name": "pallets/markupsafe",
"id": "1f9d8cdbacc608f364b9caa956fbf7b0e7272e57",
"size": "9311",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/markupsafe/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7083"
},
{
"name": "Python",
"bytes": "23025"
}
],
"symlink_target": ""
}
|
import re
from oslo_log import log
from arsenal.director import openstack_scout
LOG = log.getLogger(__name__)
def get_pyrax_token(**kwargs):
# NOTE(ClifHouck) support Rackspace-specific auth for OnMetal.
# I'm refusing to put pyrax into requirements for Arsenal, because Arsenal
# should not be Rackspace-centric.
try:
import pyrax
except ImportError as e:
LOG.error("Could not import pyrax for OnMetalScout. "
"Please install pyrax!")
raise e
pyrax.set_setting('identity_type', 'rackspace')
pyrax.set_setting('auth_endpoint', kwargs.get('auth_url'))
pyrax.set_credentials(kwargs.get('username'), kwargs.get('password'))
return pyrax.identity.auth_token
KNOWN_V1_FLAVORS = {
'onmetal-compute1': lambda node: node.properties['memory_mb'] == 32768,
'onmetal-io1': lambda node: node.properties['memory_mb'] == 131072,
'onmetal-memory1': lambda node: node.properties['memory_mb'] == 524288,
}
def is_onmetal_image(glance_image, specific_flavor_class):
flavor_classes = glance_image.get('flavor_classes')
# Sometimes an image will have no class! Shocking!
if flavor_classes is None:
return False
return ('!onmetal' not in flavor_classes and
specific_flavor_class in flavor_classes and
glance_image.get('vm_mode') == 'metal' and
glance_image.get('visibility') == 'public')
def is_onmetal_v1_image(glance_image):
return is_onmetal_image(glance_image, 'onmetal')
ONMETAL_V1_FLAVOR_NAME_REGEX = re.compile('onmetal-[a-z-]+1')
def is_onmetal_v1_flavor(flavor):
match_result = ONMETAL_V1_FLAVOR_NAME_REGEX.match(flavor.id)
return match_result is not None
class OnMetalV1Scout(openstack_scout.OpenstackScout):
"""Scouts and filters data for the OnMetal V1 Rackspace service."""
def __init__(self):
super(OnMetalV1Scout, self).__init__(
flavor_filter=is_onmetal_v1_flavor,
image_filter=is_onmetal_v1_image,
glance_auth_token_func=get_pyrax_token,
known_flavors=KNOWN_V1_FLAVORS)
def is_v2_flavor_generic(ironic_node,
expected_memory_mb,
expected_local_gb,
expected_cpus):
properties = ironic_node.get('properties')
if properties is None:
return False
memory_mb = properties.get('memory_mb')
local_gb = properties.get('local_gb')
cpus = properties.get('cpus')
return (memory_mb == expected_memory_mb and
local_gb == expected_local_gb and
cpus == expected_cpus)
KNOWN_V2_FLAVORS = {
'onmetal-general2-small':
lambda node: is_v2_flavor_generic(node, 32768, 800, 12),
'onmetal-general2-medium':
lambda node: is_v2_flavor_generic(node, 65536, 800, 24),
'onmetal-general2-large':
lambda node: is_v2_flavor_generic(node, 131072, 800, 24),
'onmetal-io2':
lambda node: is_v2_flavor_generic(node, 131072, 120, 40),
}
def is_onmetal_v2_image(glance_image):
return is_onmetal_image(glance_image, 'onmetal2')
ONMETAL_V2_FLAVOR_NAME_REGEX = re.compile('onmetal-[a-z-]+2')
def is_onmetal_v2_flavor(flavor):
match_result = ONMETAL_V2_FLAVOR_NAME_REGEX.match(flavor.id)
return match_result is not None
class OnMetalV2Scout(openstack_scout.OpenstackScout):
"""Scouts and filters data for the OnMetal V2 Rackspace service."""
def __init__(self):
super(OnMetalV2Scout, self).__init__(
flavor_filter=is_onmetal_v2_flavor,
image_filter=is_onmetal_v2_image,
glance_auth_token_func=get_pyrax_token,
known_flavors=KNOWN_V2_FLAVORS)
|
{
"content_hash": "c868540a6b58d03df03f9d3aad4ba01b",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 31.440677966101696,
"alnum_prop": 0.6520215633423181,
"repo_name": "rackerlabs/arsenal",
"id": "102187e5c5d25db4dffe32a8b667706f1d735912",
"size": "4363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arsenal/director/onmetal_scout.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "190461"
},
{
"name": "Shell",
"bytes": "8033"
}
],
"symlink_target": ""
}
|
from __future__ import division
from common import recover_xor_key, score_text
from itertools import cycle, izip, islice, tee
# Taken from https://docs.python.org/2/library/itertools.html#recipes
def _pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def hamming_distance(str1, str2):
"""Calculate the hamming distance of two strings."""
return sum([
bin(ord(chr1) ^ ord(chr2)).count("1")
for chr1, chr2 in zip(str1, str2)
])
def block_iterator(text, block_size):
"""Group text into a set of block_size blocks."""
return izip(*([iter(text)] * block_size))
def block_hamming_distance(text, start, stop):
"""Return a sorted list of hamming distances for n-length blocks."""
blocksize_scores = []
for blocksize in range(start, stop):
blocks = islice(block_iterator(text, blocksize), 4)
distance_scores = [
hamming_distance(block1, block2) / blocksize
for block1, block2 in _pairwise(blocks)
]
avg_score = sum(distance_scores) / len(distance_scores)
blocksize_scores.append((blocksize, avg_score))
return sorted(blocksize_scores, key = lambda pair: pair[1])
def transpose(iterable):
"""[[abc], [def], [ghi]] => [[adf], [beh], [cfi]]"""
return [''.join(block) for block in zip(*iterable)]
def repeating_xor_decrypt(text, key):
return ''.join([chr(ord(a) ^ ord(k)) for a, k in zip(text, cycle(key))])
def break_repeating_key_xor(ciphertext):
# Calculate the edit distance for keysizes ranging from 2 to 40
keysize_scores = block_hamming_distance(ciphertext, 2, 40)
best_key = None
best_score = None
# Try the 3 keysizes with the lowest edit distance
for keysize,_ in keysize_scores[:3]:
# Transpose the blocks
key_blocks = transpose(block_iterator(ciphertext, keysize))
# Guess the single char xor key for each block and assemble the key
key = ''.join([
recover_xor_key(block)[0] for block in key_blocks
])
# Decrypt into plaintext and score the result
plaintext = repeating_xor_decrypt(ciphertext, key)
score = score_text(plaintext)
if (best_score == None or best_score > score):
best_score = score
best_key = key
return key
def test(ciphertext, expected_key):
key = break_repeating_key_xor(ciphertext)
assert key == expected_key
|
{
"content_hash": "286e41ce3516b1a684a29113f1594a46",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 76,
"avg_line_length": 31.645569620253166,
"alnum_prop": 0.6328,
"repo_name": "ericnorris/cryptopals-solutions",
"id": "10c0fd63026f76dc727f4c69dc4b69f901615547",
"size": "2500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cryptopals/set1/challenge_06.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34169"
}
],
"symlink_target": ""
}
|
from sippy.SipConf import SipConf
try:
from urllib import quote, unquote
except ImportError:
from urllib.parse import quote, unquote
RFC3261_USER_UNRESERVED = '&=+$,;?/#'
# Quote from RFC-3261:
# Several rules are incorporated from RFC 2396 [5] but are updated to
# make them compliant with RFC 2234
RFC3261_MARK = '-_.!~*\'()'
USERNAME_SAFE = RFC3261_USER_UNRESERVED + RFC3261_MARK
class SipURL(object):
scheme = None
username = None
userparams = None
password = None
host = None
port = None
headers = None
usertype = None
transport = None
ttl = None
maddr = None
method = None
tag = None
other = None
lr = False
def __init__(self, url = None, username = None, password = None, host = None, port = None, headers = None, \
usertype = None, transport = None, ttl = None, maddr = None, method = None, tag = None, other = None, \
userparams = None, lr = False, relaxedparser = False, scheme = "sip"):
self.original_uri = url
self.other = []
self.userparams = []
if url == None:
self.scheme = scheme
self.username = username
if userparams != None:
self.userparams = userparams
self.password = password
if host == None:
self.host = SipConf.my_address
self.port = SipConf.my_port
else:
self.host = host
self.port = port
self.headers = headers
self.usertype = usertype
self.transport = transport
self.ttl = ttl
self.maddr = maddr
self.method = method
self.tag = tag
if other != None:
self.other = other
self.lr = lr
return
parts = url.split(':', 1)
if len(parts) < 2:
# scheme is missing, assume sip:
parts.insert(0, 'sip')
parts[0] = parts[0].lower()
if parts[0] not in ('sip', 'sips', 'tel'):
raise ValueError('unsupported scheme: %s:' % parts[0])
self.scheme, url = parts
if self.scheme == 'tel':
if SipConf.autoconvert_tel_url:
self.convertTelURL(url, relaxedparser)
else:
raise ValueError('tel: scheme is not supported')
else:
self.parseSipURL(url, relaxedparser)
def convertTelURL(self, url, relaxedparser):
self.scheme = 'sip'
if relaxedparser:
self.host = ''
else:
self.host = SipConf.my_address
self.port = SipConf.my_port
parts = url.split(';')
self.username = unquote(parts[0])
if len(parts) > 1:
# parse userparams
self.userparams = []
for part in parts[1:]:
# The RFC-3261 suggests the user parameter keys should
# be converted to lower case.
k, v = part.split('=')
self.userparams.append(k.lower() + '=' + v)
def parseSipURL(self, url, relaxedparser):
ear = url.find('@') + 1
parts = url[ear:].split(';')
userdomain, params = url[0:ear] + parts[0], parts[1:]
if len(params) == 0 and '?' in userdomain[ear:]:
self.headers = {}
userdomain_suff, headers = userdomain[ear:].split('?', 1)
userdomain = userdomain[:ear] + userdomain_suff
for header in headers.split('&'):
k, v = header.split('=')
self.headers[k] = unquote(v)
if ear > 0:
userpass = userdomain[:ear - 1]
hostport = userdomain[ear:]
upparts = userpass.split(':', 1)
if len(upparts) > 1:
self.password = upparts[1]
uparts = upparts[0].split(';')
if len(uparts) > 1:
self.userparams = uparts[1:]
self.username = unquote(uparts[0])
else:
hostport = userdomain
parseport = None
if relaxedparser and len(hostport) == 0:
self.host = ''
elif hostport[0] == '[':
# IPv6 host
hpparts = hostport.split(']', 1)
self.host = hpparts[0] + ']'
if len(hpparts[1]) > 0:
hpparts = hpparts[1].split(':', 1)
if len(hpparts) > 1:
parseport = hpparts[1]
else:
# IPv4 host
hpparts = hostport.split(':', 1)
if len(hpparts) == 1:
self.host = hpparts[0]
else:
self.host = hpparts[0]
parseport = hpparts[1]
if parseport != None:
try:
self.port = int(parseport)
except Exception as e:
# Can't parse port number, check why
port = parseport.strip()
if len(port) == 0:
# Bug on the other side, work around it
print('WARNING: non-compliant URI detected, empty port number, ' \
'assuming default: "%s"' % str(self.original_uri))
elif port.find(':') > 0:
pparts = port.split(':', 1)
if pparts[0] == pparts[1]:
# Bug on the other side, work around it
print('WARNING: non-compliant URI detected, duplicate port number, ' \
'taking "%s": %s' % (pparts[0], str(self.original_uri)))
self.port = int(pparts[0])
else:
raise e
else:
raise e
if len(params) > 0:
last_param = params[-1]
arr = last_param.split('?', 1)
params[-1] = arr[0]
self.setParams(params)
if len(arr) == 2:
self.headers = {}
for header in arr[1].split('&'):
k, v = header.split('=')
self.headers[k] = unquote(v)
def setParams(self, params):
self.usertype = None
self.transport = None
self.ttl = None
self.maddr = None
self.method = None
self.tag = None
self.other = []
self.lr = False
for p in params:
nv = p.split('=', 1)
if len(nv) == 1:
if p == 'lr':
self.lr = True
else:
self.other.append(p)
continue
name, value = nv
if name == 'user':
self.usertype = value
elif name == 'transport':
self.transport = value
elif name == 'ttl':
self.ttl = int(value)
elif name == 'maddr':
self.maddr = value
elif name == 'method':
self.method = value
elif name == 'tag':
self.tag = value
elif name == 'lr':
# RFC 3261 doesn't allow lr parameter to have a value,
# but many stupid implementation do it anyway
self.lr = True
else:
self.other.append(p)
def __str__(self):
return self.localStr()
def localStr(self, local_addr = None, local_port = None):
l = []; w = l.append
w(self.scheme + ':')
if self.username != None:
w(quote(self.username, USERNAME_SAFE))
for v in self.userparams:
w(';%s' % v)
if self.password != None:
w(':%s' % self.password)
w('@')
if local_addr != None and 'my' in dir(self.host):
w(local_addr)
else:
w(str(self.host))
if self.port != None:
if local_port != None and 'my' in dir(self.port):
w(':%d' % local_port)
else:
w(':%d' % self.port)
for p in self.getParams():
w(';%s' % p)
if self.headers:
w('?')
w('&'.join([('%s=%s' % (h, quote(v))) for (h, v) in self.headers.items()]))
return ''.join(l)
def getParams(self):
res = []; w = res.append
if self.usertype != None:
w('user=%s' % self.usertype)
for n in ('transport', 'ttl', 'maddr', 'method', 'tag'):
v = getattr(self, n)
if v != None:
w('%s=%s' % (n, v))
for v in self.other:
w(v)
if self.lr:
w('lr')
return res
def getCopy(self):
return SipURL(username = self.username, password = self.password, host = self.host, port = self.port, \
headers = self.headers, usertype = self.usertype, transport = self.transport, ttl = self.ttl, \
maddr = self.maddr, method = self.method, tag = self.tag, other = list(self.other), \
userparams = list(self.userparams), lr = self.lr)
def getHost(self):
return self.host
def getPort(self):
if self.port != None:
return self.port
else:
return SipConf.default_port
def getAddr(self):
if self.port != None:
return (self.host, self.port)
else:
return (self.host, SipConf.default_port)
def setAddr(self, addr):
self.host, self.port = addr
if __name__ == '__main__':
import sys
test_set = (('sip:user;par=u%40example.net@example.com', ()), \
('sip:user@example.com?Route=%3Csip:example.com%3E', ()), \
('sip:[2001:db8::10]', ()), \
('sip:[2001:db8::10]:5070', ()), \
('sip:user@example.net;tag=9817--94', ('tag=9817--94',)), \
('sip:alice@atlanta.com;ttl=15;maddr=239.255.255.1', ('ttl=15', 'maddr=239.255.255.1')), \
('sip:alice:secretword@atlanta.com;transport=tcp', ('transport=tcp',)), \
('sip:alice@atlanta.com?subject=project%20x&priority=urgent', ()), \
('sip:+1-212-555-1212:1234@gateway.com;user=phone', ('user=phone',)), \
('sip:atlanta.com;method=REGISTER?to=alice%40atlanta.com', ('method=REGISTER',)), \
('sip:alice;day=tuesday@atlanta.com', ()), \
('sip:+611234567890@ims.mnc000.mcc000.3gppnetwork.org;user=phone;npdi', ('user=phone', 'npdi')), \
('sip:1234#567890@example.com', ()), \
('sip:foo@1.2.3.4:', ()), \
('sip:foo@1.2.3.4:5060:5060', ()))
for u, mp in test_set:
su = SipURL(u)
sp = su.getParams()
print(tuple(sp), mp, su.getHost(), su.getPort())
if str(su) != u:
sys.stderr.write('URI cannot be reconstructed precisely: expected \'%s\' got \'%s\'\n' % (u, str(su)))
|
{
"content_hash": "5641ca1af7805dc4d1825358d546ad8e",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 114,
"avg_line_length": 36.04362416107382,
"alnum_prop": 0.4854296620426404,
"repo_name": "sippy/b2bua",
"id": "1708de01344601b660135dccd0207ea60981d920",
"size": "12172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sippy/SipURL.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "584478"
},
{
"name": "Shell",
"bytes": "445"
}
],
"symlink_target": ""
}
|
"""Validate manifests."""
import argparse
from pathlib import Path
import subprocess
import sys
from . import gather_info, generate, error, docs
from .const import COMPONENT_DIR
TEMPLATES = [
p.name for p in (Path(__file__).parent / "templates").glob("*") if p.is_dir()
]
def valid_integration(integration):
"""Test if it's a valid integration."""
if not (COMPONENT_DIR / integration).exists():
raise argparse.ArgumentTypeError(
f"The integration {integration} does not exist."
)
return integration
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
parser = argparse.ArgumentParser(description="Home Assistant Scaffolder")
parser.add_argument("template", type=str, choices=TEMPLATES)
parser.add_argument(
"--develop", action="store_true", help="Automatically fill in info"
)
parser.add_argument(
"--integration", type=valid_integration, help="Integration to target."
)
arguments = parser.parse_args()
return arguments
def main():
"""Scaffold an integration."""
if not Path("requirements_all.txt").is_file():
print("Run from project root")
return 1
args = get_arguments()
info = gather_info.gather_info(args)
generate.generate(args.template, info)
# If creating new integration, create config flow too
if args.template == "integration":
if info.authentication or not info.discoverable:
template = "config_flow"
else:
template = "config_flow_discovery"
generate.generate(template, info)
print("Running hassfest to pick up new information.")
subprocess.run("python -m script.hassfest", shell=True)
print()
print("Running tests")
print(f"$ pytest -vvv tests/components/{info.domain}")
if (
subprocess.run(
f"pytest -vvv tests/components/{info.domain}", shell=True
).returncode
!= 0
):
return 1
print()
print(f"Done!")
docs.print_relevant_docs(args.template, info)
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except error.ExitApp as err:
print()
print(f"Fatal Error: {err.reason}")
sys.exit(err.exit_code)
|
{
"content_hash": "c79064a83749a6a530fa39bb209e00f3",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 81,
"avg_line_length": 25.12087912087912,
"alnum_prop": 0.6325459317585301,
"repo_name": "Cinntax/home-assistant",
"id": "2258840f430d927f537c974eb9c161f456bf5c3c",
"size": "2286",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "script/scaffold/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
}
|
HIPCHAT_ADDON_KEY = 'io.close.hipchat-addon'
HIPCHAT_ADDON_NAME = 'Close.io'
HIPCHAT_ADDON_DESCRIPTION = 'A HipChat add-on to give details about a Close.io lead when its URL is mentioned in HipChat'
HIPCHAT_ADDON_VENDOR_URL = 'http://close.io'
HIPCHAT_ADDON_VENDOR_NAME = 'Close.io'
#HIPCHAT_ADDON_BASE_URL = 'https://closeio-hipchat.herokuapp.com' # no trailing slash
CACHE_TYPE = 'simple'
DEBUG=False
|
{
"content_hash": "5c4310321b284924053d09417c091c28",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 121,
"avg_line_length": 40.5,
"alnum_prop": 0.7506172839506173,
"repo_name": "elasticsales/closeio-hipchat-addon",
"id": "2a337f862043fe2410f416be1b6aacd333808485",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "4867"
}
],
"symlink_target": ""
}
|
""" This is the example demonstrates how to use Kernel Tuner
to insert tunable parameters into template arguments
"""
import json
import numpy
from kernel_tuner import tune_kernel
def tune():
kernel_string = """
template<typename T, int blockSize>
__global__ void vector_add(T *c, T *a, T *b, int n) {
auto i = blockIdx.x * blockSize + threadIdx.x;
if (i<n) {
c[i] = a[i] + b[i];
}
}
"""
size = 10000000
a = numpy.random.randn(size).astype(numpy.float32)
b = numpy.random.randn(size).astype(numpy.float32)
c = numpy.zeros_like(b)
n = numpy.int32(size)
args = [c, a, b, n]
tune_params = dict()
tune_params["block_size_x"] = [128+64*i for i in range(15)]
result, env = tune_kernel("vector_add<float, block_size_x>", kernel_string, size, args, tune_params)
with open("vector_add.json", 'w') as fp:
json.dump(result, fp)
return result
if __name__ == "__main__":
tune()
|
{
"content_hash": "d101ece3fd29dabce90b78578ac1f03f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 104,
"avg_line_length": 22.88095238095238,
"alnum_prop": 0.6118626430801248,
"repo_name": "benvanwerkhoven/kernel_tuner",
"id": "5d2d42c540021be2de825bcddc8ca945d057617a",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/cuda-c++/vector_add_blocksize.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Cuda",
"bytes": "3766"
},
{
"name": "Python",
"bytes": "425339"
}
],
"symlink_target": ""
}
|
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v12.errors",
marshal="google.ads.googleads.v12",
manifest={"CustomerClientLinkErrorEnum",},
)
class CustomerClientLinkErrorEnum(proto.Message):
r"""Container for enum describing possible CustomeClientLink
errors.
"""
class CustomerClientLinkError(proto.Enum):
r"""Enum describing possible CustomerClientLink errors."""
UNSPECIFIED = 0
UNKNOWN = 1
CLIENT_ALREADY_INVITED_BY_THIS_MANAGER = 2
CLIENT_ALREADY_MANAGED_IN_HIERARCHY = 3
CYCLIC_LINK_NOT_ALLOWED = 4
CUSTOMER_HAS_TOO_MANY_ACCOUNTS = 5
CLIENT_HAS_TOO_MANY_INVITATIONS = 6
CANNOT_HIDE_OR_UNHIDE_MANAGER_ACCOUNTS = 7
CUSTOMER_HAS_TOO_MANY_ACCOUNTS_AT_MANAGER = 8
CLIENT_HAS_TOO_MANY_MANAGERS = 9
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "7d9b33cf7254a1ec9c2626842e8ab5af",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 29.35483870967742,
"alnum_prop": 0.6703296703296703,
"repo_name": "googleads/google-ads-python",
"id": "9432211967f9df06a41e8091c09c7d8140431c3f",
"size": "1510",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/errors/types/customer_client_link_error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name="PyEXR",
version="0.3.10",
description="One line EXR manipulation library",
author="Thijs Vogels",
author_email="t.vogels@me.com",
url="https://github.com/tvogels/pyexr",
install_requires=["OpenEXR", "numpy", "future"],
dependency_links=[
"https://github.com/jamesbowman/openexrpython/tarball/master#egg=OpenEXR-1.3.0"
],
packages=find_packages(),
)
|
{
"content_hash": "ec199c383fe2cf00dc1363e278f090ef",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 87,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6659340659340659,
"repo_name": "tvogels/pyexr",
"id": "86e1b9489646e8fd4e5435c7240d4e118328c28b",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10342"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PageWidget.width'
db.delete_column(u'director_pagewidget', 'width')
# Adding field 'PageWidget.theme'
db.add_column(u'director_pagewidget', 'theme',
self.gf('django.db.models.fields.CharField')(default='Thin', max_length=10),
keep_default=False)
# Adding field 'PageWidget.data_json'
db.add_column(u'director_pagewidget', 'data_json',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'PageWidget.render_function'
db.add_column(u'director_pagewidget', 'render_function',
self.gf('django.db.models.fields.CharField')(default='notesAndChildNotes', max_length=60),
keep_default=False)
# Adding field 'DashboardWidgets.width'
db.add_column(u'director_dashboardwidgets', 'width',
self.gf('django.db.models.fields.IntegerField')(default=6, max_length=2),
keep_default=False)
# Adding field 'DashboardWidgets.height'
db.add_column(u'director_dashboardwidgets', 'height',
self.gf('django.db.models.fields.IntegerField')(default=360, max_length=3),
keep_default=False)
def backwards(self, orm):
# Adding field 'PageWidget.width'
db.add_column(u'director_pagewidget', 'width',
self.gf('django.db.models.fields.IntegerField')(default=6, max_length=2),
keep_default=False)
# Deleting field 'PageWidget.theme'
db.delete_column(u'director_pagewidget', 'theme')
# Deleting field 'PageWidget.data_json'
db.delete_column(u'director_pagewidget', 'data_json')
# Deleting field 'PageWidget.render_function'
db.delete_column(u'director_pagewidget', 'render_function')
# Deleting field 'DashboardWidgets.width'
db.delete_column(u'director_dashboardwidgets', 'width')
# Deleting field 'DashboardWidgets.height'
db.delete_column(u'director_dashboardwidgets', 'height')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'director.dashboardwidgets': {
'Meta': {'ordering': "['order']", 'object_name': 'DashboardWidgets'},
'dashboard': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['director.DirectorDashboard']"}),
'height': ('django.db.models.fields.IntegerField', [], {'default': '360', 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['director.PageWidget']"}),
'width': ('django.db.models.fields.IntegerField', [], {'default': '6', 'max_length': '2'})
},
u'director.directordashboard': {
'Meta': {'object_name': 'DirectorDashboard'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '250', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'page_widgets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['director.PageWidget']", 'null': 'True', 'through': u"orm['director.DashboardWidgets']", 'blank': 'True'}),
'site_icon': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'tracking_code': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Portal'", 'max_length': '10'})
},
u'director.pagewidget': {
'Meta': {'ordering': "['name']", 'object_name': 'PageWidget'},
'data_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iframe_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'iframe_url_if_local': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'render_function': ('django.db.models.fields.CharField', [], {'default': "'notesAndChildNotes'", 'max_length': '60'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'Thin'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Wiki'", 'max_length': '10'})
}
}
complete_apps = ['director']
|
{
"content_hash": "473c09c0cd7bc6da0596b311cfa3105e",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 228,
"avg_line_length": 66.04411764705883,
"alnum_prop": 0.5649075929637052,
"repo_name": "ngageoint/geoevents",
"id": "f00490e7e456024338a0e5bc6cfa19a3a5784fda",
"size": "9006",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geoevents/director/migrations/0010_auto__del_field_pagewidget_width__add_field_pagewidget_theme__add_fiel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7006"
},
{
"name": "CSS",
"bytes": "169395"
},
{
"name": "JavaScript",
"bytes": "10629452"
},
{
"name": "Python",
"bytes": "1589774"
},
{
"name": "Shell",
"bytes": "4212"
}
],
"symlink_target": ""
}
|
from System.Diagnostics import *
from System.IO import *
import System
from System import TimeSpan, DateTime
from Deadline.Events import *
from Deadline.Scripting import *
from Deadline.Slaves import *
import sys
import os
import textwrap
def GetDeadlineEventListener():
return SlaveMonitor()
def CleanupDeadlineEventListener(eventListener):
eventListener.Cleanup()
class SlaveMonitor(DeadlineEventListener):
def __init__(self):
self.OnJobErrorCallback += self.OnJobError
def Cleanup(self):
del self.OnJobErrorCallback
def OnJobError(self, job, task, report):
# Get slavemonitor config entries
catch_nonzero = self.GetBooleanConfigEntry('CatchNonZero')
nonzero_action = self.GetConfigEntry('NonZeroAction')
use_error_threshold = self.GetBooleanConfigEntry('UseErrorThreshold')
error_threshold = self.GetIntegerConfigEntry('ErrorThreshold')
error_threshold_action = self.GetConfigEntry('ErrorThresholdAction')
message = report.ReportMessage
is_nonzero_message = 'non-zero error code -1073740777' in message
if catch_nonzero and is_nonzero_message:
try:
fn = getattr(self, nonzero_action)
fn(job, task, slave)
except Exception as e:
log(e)
slave = report.ReportSlaveName
job_error_reports = self.get_slave_reports_since_startup(slave, job)
threshold_reached = len(job_error_reports) > error_threshold
if use_error_threshold and threshold_reached:
log('{} hit error threshold for job {}'.format(slave, job.JobName))
try:
fn = getattr(self, error_threshold_action)
fn(job, task, slave)
except Exception as e:
log(e)
def blacklist(self, job, task, slave):
log('Blacklisting {} for {}'.format(slave, job))
RepositoryUtils.AddSlavesToMachineLimitList(
job.JobId,
[slave]
)
def get_slave_reports_since_startup(self, slave, job):
slave_info = RepositoryUtils.GetSlaveInfo(slave, True)
slave_run_time = slave_info.SlaveRunningTime
slave_reports = RepositoryUtils.GetSlaveReports(slave)
error_reports = slave_reports.GetErrorReports()
job_error_reports = []
slave_start_time = DateTime.get_Now().AddSeconds(-slave_run_time)
for report in error_reports:
# Report time doesn't match local time zone
# This is obviously fucked...add 85 minutes to the reported time
# TODO: resolve this time offset nonsense
report_time = report.ReportDateTimeOf
if report.ReportJobName == job.JobName:
if DateTime.op_GreaterThan(slave_start_time, report_time):
continue
job_error_reports.append(report)
return job_error_reports
def relaunch_slave(self, job, task, slave):
log('Restarting {} for {}'.format(slave, job))
SlaveUtils.SendRemoteCommandNoWait(
task.TaskSlaveMachineName,
'ForceRelaunchSlave',
)
def restart_machine(self, job, task, slave):
log('Restarting {} for {}'.format(slave, job))
SlaveUtils.SendRemoteCommandNoWait(
task.TaskSlaveMachineName,
'OnLastTaskComplete RestartMachine',
)
def log(msg):
msg = textwrap.fill(
'SLAVEMONITOR: {}'.format(msg),
initial_indent='',
subsequent_indent=' '
)
ClientUtils.LogText(msg)
|
{
"content_hash": "8823f05872c1bfc5df6a40bc586aad3e",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 31.339130434782607,
"alnum_prop": 0.6351276359600444,
"repo_name": "danbradham/slavemonitor",
"id": "11f023040cbf8f459f7dbafdfdc9a8e37d75d4c4",
"size": "3604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slavemonitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3604"
}
],
"symlink_target": ""
}
|
from lino_xl.lib.contacts.management.commands.garble_persons import *
|
{
"content_hash": "ab40e4b37c16c56baa602bb3b5fe0fae",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 69,
"avg_line_length": 35.5,
"alnum_prop": 0.8169014084507042,
"repo_name": "lsaffre/lino-faggio",
"id": "2bed9dbd3ceecf6002e567624ebb55502bc717a5",
"size": "71",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lino_voga/lib/contacts/management/commands/garble_persons.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "5646"
},
{
"name": "Python",
"bytes": "140555"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
"""Internal-only module with immutable data structures.
Please, do not use it outside of Luigi codebase itself.
"""
from collections import OrderedDict, Mapping
import operator
import functools
class FrozenOrderedDict(Mapping):
"""
It is an immutable wrapper around ordered dictionaries that implements the complete :py:class:`collections.Mapping`
interface. It can be used as a drop-in replacement for dictionaries where immutability and ordering are desired.
"""
def __init__(self, *args, **kwargs):
self.__dict = OrderedDict(*args, **kwargs)
self.__hash = None
def __getitem__(self, key):
return self.__dict[key]
def __iter__(self):
return iter(self.__dict)
def __len__(self):
return len(self.__dict)
def __repr__(self):
# We should use short representation for beautiful console output
return repr(dict(self.__dict))
def __hash__(self):
if self.__hash is None:
hashes = map(hash, self.items())
self.__hash = functools.reduce(operator.xor, hashes, 0)
return self.__hash
def get_wrapped(self):
return self.__dict
def recursively_freeze(value):
"""
Recursively walks ``Mapping``s and ``list``s and converts them to ``FrozenOrderedDict`` and ``tuples``, respectively.
"""
if isinstance(value, Mapping):
return FrozenOrderedDict(((k, recursively_freeze(v)) for k, v in value.items()))
elif isinstance(value, list) or isinstance(value, tuple):
return tuple(recursively_freeze(v) for v in value)
return value
|
{
"content_hash": "05e77cfb2041f35a851997fec4dd0781",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 121,
"avg_line_length": 29.796296296296298,
"alnum_prop": 0.6476072094468615,
"repo_name": "adaitche/luigi",
"id": "a40c3d03803db088bb3bf6cb3fddf69278524064",
"size": "1609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "luigi/freezing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5529"
},
{
"name": "HTML",
"bytes": "42801"
},
{
"name": "JavaScript",
"bytes": "174299"
},
{
"name": "Python",
"bytes": "2173870"
},
{
"name": "Shell",
"bytes": "2901"
}
],
"symlink_target": ""
}
|
from ginga import toolkit
tkname = toolkit.get_family()
if tkname == 'qt':
from ginga.qtw.Viewers import *
elif tkname == 'gtk':
from ginga.gtkw.Viewers import *
elif tkname == 'gtk3':
from ginga.gtk3w.Viewers import *
elif tkname == 'pg':
from ginga.web.pgw.Viewers import *
from ginga.table.TableView import TableViewGw
|
{
"content_hash": "80c12142121daa94a339b1d996235dad",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 45,
"avg_line_length": 21.4375,
"alnum_prop": 0.6967930029154519,
"repo_name": "stscieisenhamer/ginga",
"id": "349620514307be6d520ee4fd4182c09768e3243a",
"size": "343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/gw/Viewers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2151"
},
{
"name": "JavaScript",
"bytes": "82354"
},
{
"name": "Python",
"bytes": "2763201"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms_lab_publications', '0004_auto_20150624_1236'),
]
operations = [
migrations.AlterField(
model_name='publicationset',
name='publications',
field=models.ManyToManyField(blank=True, to='cms_lab_publications.Publication'),
preserve_default=True,
),
]
|
{
"content_hash": "95b77ca71cdf9801bb598650032db753",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 92,
"avg_line_length": 25.36842105263158,
"alnum_prop": 0.6265560165975104,
"repo_name": "mfcovington/djangocms-lab-publications",
"id": "18d141ef2fd083e7387c537afea89d753caa3e22",
"size": "506",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms_lab_publications/migrations/0005_auto_20151109_1640.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1826"
},
{
"name": "HTML",
"bytes": "7642"
},
{
"name": "JavaScript",
"bytes": "4293"
},
{
"name": "Python",
"bytes": "34629"
}
],
"symlink_target": ""
}
|
import time
import json
from . import QueueStatReporter
from ..stratum_server import StratumClient
# Parameters: {"current block"'s key name,
# current timestamp,
# new key name for "current block" (something like unproc_block_{block_hash}}
solve_rotate_multichain = """
-- Get all the keys so we can find all the sharechains that contributed
local keys = redis.call('HKEYS', ARGV[1])
-- Set the end time of block solve. This also serves to guarentee the key is there...
redis.call('HSET', ARGV[1], 'solve_time', ARGV[2])
-- Rename to new home
redis.call('rename', ARGV[1], ARGV[3])
-- Initialize the new block key with a start time
redis.call('HSET', ARGV[1], 'start_time', ARGV[2])
-- Parse out and rotate all share chains. I'm sure this is terrible, no LUA skillz
local idx_map = {}
for key, val in pairs(keys) do
local t = {}
local i = 0
for w in string.gmatch(val, "%w+") do
t[i] = w
i = i + 1
end
if t[0] == "chain" and t[2] == "shares" then
local base = "chain_" .. t[1] .. "_slice"
local idx = redis.call('incr', base .. "_index")
redis.pcall('HSET', ARGV[1], "chain_" .. t[1] .. "_start_index", "" .. idx)
redis.pcall('renamenx', base, base .. "_" .. idx)
table.insert(idx_map, t[1] .. ":" .. idx)
end
end
return idx_map
"""
class RedisReporter(QueueStatReporter):
one_sec_stats = ['queued']
gl_methods = ['_queue_proc', '_report_one_min']
defaults = QueueStatReporter.defaults.copy()
defaults.update(dict(redis={}, chain=1))
def __init__(self, config):
self._configure(config)
super(RedisReporter, self).__init__()
# Import reporter type specific modules here as to not require them
# for using powerpool with other reporters
import redis
# A list of exceptions that would indicate that retrying a queue item
# COULD EVENTUALLY work (ie, bad connection because server
# maintenince). Errors that are likely to occur because of bad
# coding/edge cases should be let through and data discarded after a
# few attempts.
self.queue_exceptions = (redis.exceptions.ConnectionError,
redis.exceptions.InvalidResponse,
redis.exceptions.TimeoutError,
redis.exceptions.ConnectionError)
self.redis = redis.Redis(**self.config['redis'])
self.solve_cmd = self.redis.register_script(solve_rotate_multichain)
@property
def status(self):
return dict(queue_size=self.queue.qsize())
def _queue_log_one_minute(self, address, worker, algo, stamp, typ, amount):
# Include worker info if defined
address += "." + worker
self.redis.hincrbyfloat(
"min_{}_{}_{}".format(StratumClient.share_type_strings[typ], algo, stamp),
address, amount)
def _queue_add_block(self, address, height, total_subsidy, fees, hex_bits,
hex_hash, currency, algo, merged=False, worker=None,
**kwargs):
block_key = 'current_block_{}_{}'.format(currency, algo)
new_block_key = "unproc_block_{}".format(hex_hash)
chain_indexes_serial = self.solve_cmd(keys=[], args=[block_key, time.time(), new_block_key])
chain_indexs = {}
for chain in chain_indexes_serial:
chain_id, last_index = chain.split(":")
chain_indexs["chain_{}_solve_index".format(chain_id)] = last_index
self.redis.hmset(new_block_key, dict(address=address,
worker=worker,
height=height,
total_subsidy=total_subsidy,
fees=fees,
hex_bits=hex_bits,
hash=hex_hash,
currency=currency,
algo=algo,
merged=int(bool(merged)),
**chain_indexs))
def _queue_log_share(self, address, shares, algo, currency, merged=False):
block_key = 'current_block_{}_{}'.format(currency, algo)
chain_key = 'chain_{}_shares'.format(self.config['chain'])
chain_slice = 'chain_{}_slice'.format(self.config['chain'])
user_shares = '{}:{}'.format(address, shares)
self.redis.hincrbyfloat(block_key, chain_key, shares)
self.redis.rpush(chain_slice, user_shares)
def log_share(self, client, diff, typ, params, job=None, header_hash=None, header=None,
**kwargs):
super(RedisReporter, self).log_share(
client, diff, typ, params, job=job, header_hash=header_hash,
header=header, **kwargs)
if typ != StratumClient.VALID_SHARE:
return
for currency in job.merged_data:
self.queue.put(("_queue_log_share", [], dict(address=client.address,
shares=diff,
algo=job.algo,
currency=currency,
merged=True)))
self.queue.put(("_queue_log_share", [], dict(address=client.address,
shares=diff,
algo=job.algo,
currency=job.currency,
merged=False)))
def _queue_agent_send(self, address, worker, typ, data, stamp):
if typ == "hashrate" or typ == "temp":
stamp = (stamp // 60) * 60
for did, val in enumerate(data):
self.redis.hset("{}_{}".format(typ, stamp),
"{}_{}_{}".format(address, worker, did),
val)
elif typ == "status":
# Set time so we know how fresh the data is
data['time'] = time.time()
# Remove the data in 1 day
self.redis.setex("status_{}_{}".format(address, worker),
json.dumps(data), 86400)
else:
self.logger.warn("Recieved unsupported ppagent type {}"
.format(typ))
def agent_send(self, *args, **kwargs):
self.queue.put(("_queue_agent_send", args, kwargs))
#import redis
#redis = redis.Redis()
#solve_cmd = redis.register_script(solve_rotate_multichain)
#redis.hincrbyfloat("current_block_testing", "chain_1_shares", 12.5)
#print solve_cmd(keys=[], args=["current_block_testing", time.time(),
# "unproc_block_testing"])
#exit(0)
|
{
"content_hash": "335893770785c0c53c8cdea0d3a5e589",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 100,
"avg_line_length": 44.84615384615385,
"alnum_prop": 0.521726700971984,
"repo_name": "sigwo/powerpool",
"id": "c413fd9bdc12ea574763b03dcd6ae1288e4e704f",
"size": "6996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "powerpool/reporters/redis_reporter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "144070"
},
{
"name": "Shell",
"bytes": "581"
}
],
"symlink_target": ""
}
|
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova.objects import cell_mapping
from nova import test
from nova.tests import fixtures
SAMPLE_MAPPING = {'uuid': '',
'name': 'fake-cell',
'transport_url': 'rabbit:///',
'database_connection': 'mysql+pymysql:///'}
def create_mapping(**kwargs):
args = SAMPLE_MAPPING.copy()
if 'uuid' not in kwargs:
args['uuid'] = uuidutils.generate_uuid()
args.update(kwargs)
ctxt = context.RequestContext()
return cell_mapping.CellMapping._create_in_db(ctxt, args)
class CellMappingTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(CellMappingTestCase, self).setUp()
self.useFixture(fixtures.Database(database='api'))
self.context = context.RequestContext('fake-user', 'fake-project')
self.mapping_obj = cell_mapping.CellMapping()
def test_get_by_uuid(self):
mapping = create_mapping()
db_mapping = self.mapping_obj._get_by_uuid_from_db(self.context,
mapping['uuid'])
for key in self.mapping_obj.fields.keys():
self.assertEqual(db_mapping[key], mapping[key])
def test_get_by_uuid_not_found(self):
self.assertRaises(exception.CellMappingNotFound,
self.mapping_obj._get_by_uuid_from_db, self.context,
uuidutils.generate_uuid())
def test_save_in_db(self):
mapping = create_mapping()
self.mapping_obj._save_in_db(self.context, mapping['uuid'],
{'name': 'meow'})
db_mapping = self.mapping_obj._get_by_uuid_from_db(self.context,
mapping['uuid'])
self.assertNotEqual(db_mapping['name'], mapping['name'])
for key in [key for key in self.mapping_obj.fields.keys()
if key not in ['name', 'updated_at']]:
self.assertEqual(db_mapping[key], mapping[key])
def test_destroy_in_db(self):
mapping = create_mapping()
self.mapping_obj._get_by_uuid_from_db(self.context, mapping['uuid'])
self.mapping_obj._destroy_in_db(self.context, mapping['uuid'])
self.assertRaises(exception.CellMappingNotFound,
self.mapping_obj._get_by_uuid_from_db, self.context,
mapping['uuid'])
def test_destroy_in_db_not_found(self):
self.assertRaises(exception.CellMappingNotFound,
self.mapping_obj._destroy_in_db, self.context,
uuidutils.generate_uuid())
class CellMappingListTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(CellMappingListTestCase, self).setUp()
self.useFixture(fixtures.Database(database='api'))
def test_get_all(self):
mappings = {}
mapping = create_mapping()
mappings[mapping['uuid']] = mapping
mapping = create_mapping()
mappings[mapping['uuid']] = mapping
ctxt = context.RequestContext()
db_mappings = cell_mapping.CellMappingList._get_all_from_db(ctxt)
for db_mapping in db_mappings:
mapping = mappings[db_mapping.uuid]
for key in cell_mapping.CellMapping.fields.keys():
self.assertEqual(db_mapping[key], mapping[key])
|
{
"content_hash": "6fce0c4ce52ff2d23db6a935c4f04161",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 76,
"avg_line_length": 36.9,
"alnum_prop": 0.6214995483288166,
"repo_name": "rajalokan/nova",
"id": "ccb4928db42a965df9cf7cb4a927c5f0f55e0e7b",
"size": "3894",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/tests/functional/db/test_cell_mapping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
}
|
import inspect
import random
import time
from math import sqrt
def monte_carlo_average(function, lowerLimit, upperLimit, acceptableError,\
maximumIterations=100000):
"""
title::
monte_carlo_average
description::
This method will perform an integration using the Monte Carlo "Average"
method. If the method is given a list or tuple it is assumed that the
values are the y-values of the function and are between the desired
bounds to integrate over. If the method is given a function it will
iterate until it obtains the area under the curve to within a specified
acceptable error.
attributes::
function
(function, list or tuple) Function for which the area will be found
under. If list or tuple, it is assumed the values are the y-values
of the function and are between the desired bounds to integrate
over.
lowerLimit
(int or float) The lower (left-hand) boundary of the region beneath
the function for which the area is to be found. Necessary but
ignored if the function attribute is a list or tuple
upperLimit
(int or float) The upper (right-hand) boundary of the region
beneath. Necessary but ignored if the function attribute is a
list or tuple
acceptableError
(float) The acceptable error for the approximation of the area.
Necessary but ignored if the function attribute is a list or tuple
maximumIterations
(int [optional]) The maximum allowable number of iterations that
the method may execute before raising a RuntimeError. Default
value of 100000. Ignored if function attribute is a list or tuple
returns::
area
(float) The area under the curve to within a specified acceptable
error. If function attribute is a list or tuple, area accuracy
will be dependent on number of elements in the function and will
not be within specified error.
author::
Alex Perkins
copyright::
Copyright (C) 2016, Rochester Institute of Technology
version::
1.0.0
"""
# Check if function attribute is a list or tuple
if isinstance(function, list) or isinstance(function, tuple):
# Get number of elements in function attribute. Calculate average
# of all elements.
fSquaredBar = 0
numElements = len(function)
fBar = sum(function)/numElements
# Square the elements in the function attribute and sum toegether
for i in function:
fSquaredBar += i**2
# Calculate average of the squares. Then calculate epsilon
fSquaredBar /= numElements
epsilon = (upperLimit - lowerLimit)*sqrt((fSquaredBar - fBar**2)\
/numElements)
# Calculate area and print epsilon. Comment out if unnecessary to
# print out.
area = (upperLimit - lowerLimit)*fBar
print('Error: {0}'.format(epsilon))
# Check if function attribute is a function
elif inspect.isfunction(function):
# Create small table for display purposes on command line.
# Comment out if unnecessary.
print('\nArea\t\tEpsilon\t\tIterations')
# Define variables. Epsilon is set to infinity in order to begin
# while loop.
fSum = 0
fSquaredSum = 0
iterations = 0
epsilon = float('inf')
# Start integration. Once epsilon is less than acceptable error the
# while loop is stopped. On first iterration epsilon is 0 so
while epsilon >= acceptableError or iterations <= 1:
# Create a uniformly random number between the lower and upper
# limits of the function.
randomNumber = random.uniform(lowerLimit, upperLimit)
# If the number of iterations reaches the maximum allowable
# iterations a RuntimeError is raised.
if iterations == maximumIterations:
print()
raise RuntimeError('Reached maximum number of allowed '\
'iterations: {0}'.format(maximumIterations))
else:
iterations += 1
# Evaluates the function with the random number and adds
# to the total sum. fSquaredSum sums the squares
fSum += function(randomNumber)
fSquaredSum += function(randomNumber)**2
# Calculate an average of each total sum
fBar = fSum/iterations
fSquaredBar = fSquaredSum/iterations
# Calculate epsilon
epsilon = (upperLimit - lowerLimit)*\
sqrt((fSquaredBar - fBar**2) / iterations)
# Calculate area under curve
area = (upperLimit - lowerLimit)*fBar
# Prints out area under curve, epsilon, and iterations.
# Continuously updates during integration. Comment out if
# unnecessary.
print('{0:.6f}\t{1:.6f}\t{2}'\
.format(area, epsilon, iterations),end='\r')
else:
# Raise TypeError if the function attribute is not a function, list
# or tuple
msg = 'Provided attribute "function" is not a function, list or tuple'
raise TypeError(msg)
# Return area under curve. Print is added for command line formatting
# purposes. Comment out if unnecessary.
print()
return area
if __name__ == '__main__':
import math
import numerical.integrate
import time
def f(x):
return math.sqrt(x)
lowerLimit = 0.0
upperLimit = 1.0
for acceptableError in (0.1, 0.01, 0.001, 0.0001):
startTime = time.time()
area = numerical.integrate.monte_carlo_average(f,\
lowerLimit,\
upperLimit,\
acceptableError)
print('Elapsed time = {0:.6f} [s]'.format(time.time() - startTime))
print('With an acceptable error of {0:.10f}'.format(acceptableError))
print('Area of f(x)=sqrt(x) over [{0}, {1}] = {2}'.format(lowerLimit,\
upperLimit,\
area))
|
{
"content_hash": "b1037768c21f038f4cb3f41a4fa84c7c",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 38.49425287356322,
"alnum_prop": 0.5764407285756943,
"repo_name": "aap5869/RIT",
"id": "39c3e32ebcb6a7ab65f918a04046f0aa079e4225",
"size": "6698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "computing_and_control/numerical/integrate/monte_carlo_average.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "2080"
},
{
"name": "Python",
"bytes": "59326"
}
],
"symlink_target": ""
}
|
import os
from frb.cfx import CFX
from frb.utils import find_file
from frb.raw_data import M5, dspec_cat
from frb.search_candidates import Searcher
from frb.dedispersion import de_disperse_cumsum
from frb.search import search_candidates, create_ellipses
from frb.queries import query_frb, connect_to_db
# Setup
exp_code = 'raks12er'
cfx_file = '/home/ilya/code/frb/frb/RADIOASTRON_RAKS12ER_L_20151105T130000_ASC_V1.cfx'
data_dir = '/mnt/frb_data/raw_data/2015_309_raks12er'
dspec_params = {'nchan': 64, 'dt': 1, 'offst': 0, 'dur': 10, 'outfile': None}
# Split an M5-file into [sec] intervals
split_duration = 0.5
cobj = CFX(cfx_file)
cfx_data = cobj.parse_cfx(exp_code)
if cobj.freq == 'K':
print("Skipping K-band CFX file: {}".format(os.path.basename(cfx_file)))
print("NOTE: You can delete following files from data path:")
print(cfx_data)
for fname, params in cfx_data.items():
fname = fname.split(".")[0]
import glob
m5file = glob.glob(os.path.join(os.path.join(data_dir, params[1].lower()),
fname + "*"))[0]
m5file_fmt = params[2] # Raw data format
cfx_fmt = params[-1] # Rec configuration
m5 = M5(m5file, m5file_fmt)
offst = 0
dspec_params.update({'dur': split_duration})
while offst*32e6 < m5.size:
dspec_params.update({'offst':offst})
# print dspec_params
ds = m5.create_dspec(**dspec_params)
# NOTE: all 4 channels are stacked forming dsarr:
dsarr = dspec_cat(os.path.basename(ds['Dspec_file']), cfx_fmt)
metadata = ds
metadata['Raw_data_file'] = fname
metadata['Exp_data'] = params
print "BRV SEARCHING..." # search brv in array here
# TODO: save search results, delete data, ...
offst = offst + split_duration
antennas = list()
antennas = ['AR', 'EF', 'RA']
# Step of de-dispersion
d_dm = 25.
for antenna in antennas:
meta_data = {'antenna': antenna, 'freq': 'L', 'band': 'U', 'pol': 'R',
'exp_code': 'raks00', 'nu_max': 1684., 't_0': t,
'd_nu': 16./256., 'd_t': 0.001}
# Values of DM to de-disperse
dm_grid = np.arange(0., 1000., d_dm)
# Initialize searcher class
searcher = Searcher(dsp=frame.values, meta_data=meta_data)
# Run search for FRB with some parameters of de-dispersion, pre-processing,
# searching algorithms
candidates = searcher.run(de_disp_func=de_disperse_cumsum,
search_func=search_candidates,
preprocess_func=create_ellipses,
de_disp_args=[dm_grid],
search_kwargs={'n_d_x': 5., 'n_d_y': 15.,
'd_dm': d_dm},
preprocess_kwargs={'disk_size': 3,
'threshold_perc': 98.,
'statistic': 'mean'},
db_file='/home/ilya/code/akutkin/frb/frb/frb.db')
print "Found {} pulses".format(len(candidates))
for candidate in candidates:
print candidate
session = connect_to_db("/home/ilya/code/akutkin/frb/frb/frb.db")
# Query DB
frb_list = query_frb(session, exp_code, d_dm=100., d_t=0.1)
for frb in frb_list:
print frb
|
{
"content_hash": "4308dadd50428f3f9d4ab78787f61d24",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 86,
"avg_line_length": 39.726190476190474,
"alnum_prop": 0.5783637998201978,
"repo_name": "ipashchenko/frb",
"id": "9df40e57e0774fefd4377f5d03aad395a958fdd7",
"size": "3337",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "examples/process_one_experiment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10799"
},
{
"name": "Makefile",
"bytes": "211"
},
{
"name": "Python",
"bytes": "102317"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
}
|
"""
Tests For ZoneManager
"""
import datetime
import mox
import novaclient
from nova import context
from nova import db
from nova import flags
from nova import service
from nova import test
from nova import rpc
from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import zone_manager
FLAGS = flags.FLAGS
class FakeZone:
"""Represents a fake zone from the db"""
def __init__(self, *args, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
def exploding_novaclient(zone):
"""Used when we want to simulate a novaclient call failing."""
raise Exception("kaboom")
class ZoneManagerTestCase(test.TestCase):
"""Test case for zone manager"""
def test_ping(self):
zm = zone_manager.ZoneManager()
self.mox.StubOutWithMock(zm, '_refresh_from_db')
self.mox.StubOutWithMock(zm, '_poll_zones')
zm._refresh_from_db(mox.IgnoreArg())
zm._poll_zones(mox.IgnoreArg())
self.mox.ReplayAll()
zm.ping(None)
self.mox.VerifyAll()
def test_refresh_from_db_new(self):
zm = zone_manager.ZoneManager()
self.mox.StubOutWithMock(db, 'zone_get_all')
db.zone_get_all(mox.IgnoreArg()).AndReturn([
FakeZone(id=1, api_url='http://foo.com', username='user1',
password='pass1'),
])
self.assertEquals(len(zm.zone_states), 0)
self.mox.ReplayAll()
zm._refresh_from_db(None)
self.mox.VerifyAll()
self.assertEquals(len(zm.zone_states), 1)
self.assertEquals(zm.zone_states[1].username, 'user1')
def test_service_capabilities(self):
zm = zone_manager.ZoneManager()
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, {})
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3)))
zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30)))
zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc10_a=(99, 99), svc10_b=(99, 99)))
zm.update_service_capabilities("svc1", "host3", dict(c=5))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc1_c=(5, 5), svc10_a=(99, 99),
svc10_b=(99, 99)))
def test_refresh_from_db_replace_existing(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
username='user1', password='pass1'))
zm.zone_states[1] = zone_state
self.mox.StubOutWithMock(db, 'zone_get_all')
db.zone_get_all(mox.IgnoreArg()).AndReturn([
FakeZone(id=1, api_url='http://foo.com', username='user2',
password='pass2'),
])
self.assertEquals(len(zm.zone_states), 1)
self.mox.ReplayAll()
zm._refresh_from_db(None)
self.mox.VerifyAll()
self.assertEquals(len(zm.zone_states), 1)
self.assertEquals(zm.zone_states[1].username, 'user2')
def test_refresh_from_db_missing(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
username='user1', password='pass1'))
zm.zone_states[1] = zone_state
self.mox.StubOutWithMock(db, 'zone_get_all')
db.zone_get_all(mox.IgnoreArg()).AndReturn([])
self.assertEquals(len(zm.zone_states), 1)
self.mox.ReplayAll()
zm._refresh_from_db(None)
self.mox.VerifyAll()
self.assertEquals(len(zm.zone_states), 0)
def test_refresh_from_db_add_and_delete(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
username='user1', password='pass1'))
zm.zone_states[1] = zone_state
self.mox.StubOutWithMock(db, 'zone_get_all')
db.zone_get_all(mox.IgnoreArg()).AndReturn([
FakeZone(id=2, api_url='http://foo.com', username='user2',
password='pass2'),
])
self.assertEquals(len(zm.zone_states), 1)
self.mox.ReplayAll()
zm._refresh_from_db(None)
self.mox.VerifyAll()
self.assertEquals(len(zm.zone_states), 1)
self.assertEquals(zm.zone_states[2].username, 'user2')
def test_poll_zone(self):
self.mox.StubOutWithMock(zone_manager, '_call_novaclient')
zone_manager._call_novaclient(mox.IgnoreArg()).AndReturn(
dict(name='zohan', capabilities='hairdresser'))
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=2,
api_url='http://foo.com', username='user2',
password='pass2'))
zone_state.attempt = 1
self.mox.ReplayAll()
zone_manager._poll_zone(zone_state)
self.mox.VerifyAll()
self.assertEquals(zone_state.attempt, 0)
self.assertEquals(zone_state.name, 'zohan')
def test_poll_zone_fails(self):
self.stubs.Set(zone_manager, "_call_novaclient", exploding_novaclient)
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=2,
api_url='http://foo.com', username='user2',
password='pass2'))
zone_state.attempt = FLAGS.zone_failures_to_offline - 1
self.mox.ReplayAll()
zone_manager._poll_zone(zone_state)
self.mox.VerifyAll()
self.assertEquals(zone_state.attempt, 3)
self.assertFalse(zone_state.is_active)
self.assertEquals(zone_state.name, None)
|
{
"content_hash": "29e473c80d3032c9e7fcd9a0d863b9f5",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 78,
"avg_line_length": 34.95161290322581,
"alnum_prop": 0.5936009844639286,
"repo_name": "termie/nova-migration-demo",
"id": "e132809dc30e5294d8a9a812a6ce1f6ba2f28407",
"size": "7162",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/test_zones.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "2431410"
},
{
"name": "Shell",
"bytes": "31459"
}
],
"symlink_target": ""
}
|
"""Pull a sandwich run's output directory's metrics from traces into a CSV.
python pull_sandwich_metrics.py -h
"""
import collections
import logging
import os
import shutil
import sys
import tempfile
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'tools', 'perf'))
from chrome_telemetry_build import chromium_config
sys.path.append(chromium_config.GetTelemetryDir())
from telemetry.internal.image_processing import video
from telemetry.util import image_util
from telemetry.util import rgba_color
import loading_trace as loading_trace_module
import tracing
CATEGORIES = ['blink.user_timing', 'disabled-by-default-memory-infra']
CSV_FIELD_NAMES = [
'id',
'url',
'total_load',
'onload',
'browser_malloc_avg',
'browser_malloc_max',
'speed_index']
_TRACKED_EVENT_NAMES = set(['requestStart', 'loadEventStart', 'loadEventEnd'])
# Points of a completeness record.
#
# Members:
# |time| is in milliseconds,
# |frame_completeness| value representing how complete the frame is at a given
# |time|. Caution: this completeness might be negative.
CompletenessPoint = collections.namedtuple('CompletenessPoint',
('time', 'frame_completeness'))
def _GetBrowserPID(tracing_track):
"""Get the browser PID from a trace.
Args:
tracing_track: The tracing.TracingTrack.
Returns:
The browser's PID as an integer.
"""
for event in tracing_track.GetEvents():
if event.category != '__metadata' or event.name != 'process_name':
continue
if event.args['name'] == 'Browser':
return event.pid
raise ValueError('couldn\'t find browser\'s PID')
def _GetBrowserDumpEvents(tracing_track):
"""Get the browser memory dump events from a tracing track.
Args:
tracing_track: The tracing.TracingTrack.
Returns:
List of memory dump events.
"""
browser_pid = _GetBrowserPID(tracing_track)
browser_dumps_events = []
for event in tracing_track.GetEvents():
if event.category != 'disabled-by-default-memory-infra':
continue
if event.type != 'v' or event.name != 'periodic_interval':
continue
# Ignore dump events for processes other than the browser process
if event.pid != browser_pid:
continue
browser_dumps_events.append(event)
if len(browser_dumps_events) == 0:
raise ValueError('No browser dump events found.')
return browser_dumps_events
def _GetWebPageTrackedEvents(tracing_track):
"""Get the web page's tracked events from a tracing track.
Args:
tracing_track: The tracing.TracingTrack.
Returns:
Dictionary all tracked events.
"""
main_frame = None
tracked_events = {}
for event in tracing_track.GetEvents():
if event.category != 'blink.user_timing':
continue
event_name = event.name
# Ignore events until about:blank's unloadEventEnd that give the main
# frame id.
if not main_frame:
if event_name == 'unloadEventEnd':
main_frame = event.args['frame']
logging.info('found about:blank\'s event \'unloadEventEnd\'')
continue
# Ignore sub-frames events. requestStart don't have the frame set but it
# is fine since tracking the first one after about:blank's unloadEventEnd.
if 'frame' in event.args and event.args['frame'] != main_frame:
continue
if event_name in _TRACKED_EVENT_NAMES and event_name not in tracked_events:
logging.info('found url\'s event \'%s\'' % event_name)
tracked_events[event_name] = event
assert len(tracked_events) == len(_TRACKED_EVENT_NAMES)
return tracked_events
def _PullMetricsFromLoadingTrace(loading_trace):
"""Pulls all the metrics from a given trace.
Args:
loading_trace: loading_trace_module.LoadingTrace.
Returns:
Dictionary with all CSV_FIELD_NAMES's field set (except the 'id').
"""
browser_dump_events = _GetBrowserDumpEvents(loading_trace.tracing_track)
web_page_tracked_events = _GetWebPageTrackedEvents(
loading_trace.tracing_track)
browser_malloc_sum = 0
browser_malloc_max = 0
for dump_event in browser_dump_events:
attr = dump_event.args['dumps']['allocators']['malloc']['attrs']['size']
assert attr['units'] == 'bytes'
size = int(attr['value'], 16)
browser_malloc_sum += size
browser_malloc_max = max(browser_malloc_max, size)
return {
'total_load': (web_page_tracked_events['loadEventEnd'].start_msec -
web_page_tracked_events['requestStart'].start_msec),
'onload': (web_page_tracked_events['loadEventEnd'].start_msec -
web_page_tracked_events['loadEventStart'].start_msec),
'browser_malloc_avg': browser_malloc_sum / float(len(browser_dump_events)),
'browser_malloc_max': browser_malloc_max
}
def _ExtractCompletenessRecordFromVideo(video_path):
"""Extracts the completeness record from a video.
The video must start with a filled rectangle of orange (RGB: 222, 100, 13), to
give the view-port size/location from where to compute the completeness.
Args:
video_path: Path of the video to extract the completeness list from.
Returns:
list(CompletenessPoint)
"""
video_file = tempfile.NamedTemporaryFile()
shutil.copy(video_path, video_file.name)
video_capture = video.Video(video_file)
histograms = [
(time, image_util.GetColorHistogram(
image, ignore_color=rgba_color.WHITE, tolerance=8))
for time, image in video_capture.GetVideoFrameIter()
]
start_histogram = histograms[1][1]
final_histogram = histograms[-1][1]
total_distance = start_histogram.Distance(final_histogram)
def FrameProgress(histogram):
if total_distance == 0:
if histogram.Distance(final_histogram) == 0:
return 1.0
else:
return 0.0
return 1 - histogram.Distance(final_histogram) / total_distance
return [(time, FrameProgress(hist)) for time, hist in histograms]
def ComputeSpeedIndex(completeness_record):
"""Computes the speed-index from a completeness record.
Args:
completeness_record: list(CompletenessPoint)
Returns:
Speed-index value.
"""
speed_index = 0.0
last_time = completeness_record[0][0]
last_completness = completeness_record[0][1]
for time, completeness in completeness_record:
if time < last_time:
raise ValueError('Completeness record must be sorted by timestamps.')
elapsed = time - last_time
speed_index += elapsed * (1.0 - last_completness)
last_time = time
last_completness = completeness
return speed_index
def PullMetricsFromOutputDirectory(output_directory_path):
"""Pulls all the metrics from all the traces of a sandwich run directory.
Args:
output_directory_path: The sandwich run's output directory to pull the
metrics from.
Returns:
List of dictionaries with all CSV_FIELD_NAMES's field set.
"""
assert os.path.isdir(output_directory_path)
metrics = []
for node_name in os.listdir(output_directory_path):
if not os.path.isdir(os.path.join(output_directory_path, node_name)):
continue
try:
page_id = int(node_name)
except ValueError:
continue
run_path = os.path.join(output_directory_path, node_name)
trace_path = os.path.join(run_path, 'trace.json')
if not os.path.isfile(trace_path):
continue
logging.info('processing \'%s\'' % trace_path)
loading_trace = loading_trace_module.LoadingTrace.FromJsonFile(trace_path)
row_metrics = {key: 'unavailable' for key in CSV_FIELD_NAMES}
row_metrics.update(_PullMetricsFromLoadingTrace(loading_trace))
row_metrics['id'] = page_id
row_metrics['url'] = loading_trace.url
video_path = os.path.join(run_path, 'video.mp4')
if os.path.isfile(video_path):
logging.info('processing \'%s\'' % video_path)
completeness_record = _ExtractCompletenessRecordFromVideo(video_path)
row_metrics['speed_index'] = ComputeSpeedIndex(completeness_record)
metrics.append(row_metrics)
assert len(metrics) > 0, ('Looks like \'{}\' was not a sandwich ' +
'run directory.').format(output_directory_path)
return metrics
|
{
"content_hash": "57af849e59f99c656ca757a995cc064d",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 80,
"avg_line_length": 32.09448818897638,
"alnum_prop": 0.6935721295387635,
"repo_name": "junhuac/MQUIC",
"id": "e30232c05ab95282c62bb4fd2950de8696e8900d",
"size": "8315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tools/android/loading/sandwich_metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "Assembly",
"bytes": "5386"
},
{
"name": "Batchfile",
"bytes": "42909"
},
{
"name": "C",
"bytes": "1168925"
},
{
"name": "C#",
"bytes": "81308"
},
{
"name": "C++",
"bytes": "43919800"
},
{
"name": "CMake",
"bytes": "46379"
},
{
"name": "CSS",
"bytes": "19668"
},
{
"name": "Emacs Lisp",
"bytes": "32613"
},
{
"name": "Go",
"bytes": "7247"
},
{
"name": "Groff",
"bytes": "127224"
},
{
"name": "HTML",
"bytes": "2548385"
},
{
"name": "Java",
"bytes": "1332462"
},
{
"name": "JavaScript",
"bytes": "851006"
},
{
"name": "M4",
"bytes": "29823"
},
{
"name": "Makefile",
"bytes": "459525"
},
{
"name": "Objective-C",
"bytes": "120158"
},
{
"name": "Objective-C++",
"bytes": "330017"
},
{
"name": "PHP",
"bytes": "11283"
},
{
"name": "Protocol Buffer",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "16872234"
},
{
"name": "R",
"bytes": "1842"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "764509"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "VimL",
"bytes": "12288"
},
{
"name": "nesC",
"bytes": "14779"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('teams', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('nucleus', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='teammemberhistory',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teams.Team'),
),
migrations.AddField(
model_name='teammember',
name='player',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='teammember',
name='position',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nucleus.Position'),
),
migrations.AddField(
model_name='teammember',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teams.Team'),
),
migrations.AddField(
model_name='emailrecord',
name='to',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
{
"content_hash": "709780274731eb08d0c244eb7e8a0b46",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 128,
"avg_line_length": 34.095238095238095,
"alnum_prop": 0.6068435754189944,
"repo_name": "prattl/wepickheroes",
"id": "76b88e7b318dd113b6789a32fa32a333046d02ff",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/nucleus/migrations/0002_auto_20180102_0307.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2768"
},
{
"name": "JavaScript",
"bytes": "16586"
},
{
"name": "Python",
"bytes": "11233"
},
{
"name": "Shell",
"bytes": "4079"
}
],
"symlink_target": ""
}
|
""" Pretty printers for sparse matrix classes
"""
from .util import stripType, templateParams, strToBool
class CompressedMatrixPrinter:
"""Print blaze::StaticMatrix"""
def __init__(self, val):
"""Extract all the necessary information"""
self.type = stripType(val.type)
template_params = templateParams(self.type)
self.rows = val['m_']
self.columns = val['n_']
self.begin = val['begin_']
self.end = val['end_']
self.columnMajor = strToBool(template_params[1])
self.elementType = self.type.template_argument(0)
def children(self):
"""Enumerate child items"""
yield ('rows', self.rows)
yield ('columns', self.columns)
# Enumerate non-zero elements and their indices
for i in range(self.columns if self.columnMajor else self.rows):
begin = self.begin[i]
end = self.end[i]
it = begin
while it < end:
j = it.dereference()['index_']
row = j if self.columnMajor else i
column = i if self.columnMajor else j
yield ('[{0}, {1}]'.format(row, column), it.dereference()['value_'])
it += 1
def storageOrder(self):
return 'columnMajor' if self.columnMajor else 'rowMajor'
def to_string(self):
return "CompressedMatrix<{0}, {1}>".format(self.elementType, self.storageOrder())
|
{
"content_hash": "19c8fa8064ea9119cdfd829c05cdb9f8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 83,
"avg_line_length": 25.632653061224488,
"alnum_prop": 0.6687898089171974,
"repo_name": "camillescott/boink",
"id": "10f3c4dd1553f27502371a5a667e6cd55b0c2c9d",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "include/goetia/sketches/sketch/vec/blaze/tools/gdb/blaze_pretty_printers/sparse_matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "100250"
},
{
"name": "C++",
"bytes": "1054510"
},
{
"name": "CMake",
"bytes": "302273"
},
{
"name": "Jupyter Notebook",
"bytes": "17489756"
},
{
"name": "Python",
"bytes": "267582"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
}
|
from datadog.api.base import ActionAPIResource
class User(ActionAPIResource):
"""
A wrapper around User HTTP API.
"""
@classmethod
def invite(cls, emails):
"""
Send an invite to join datadog to each of the email addresses in the
*emails* list. If *emails* is a string, it will be wrapped in a list and
sent. Returns a list of email addresses for which an email was sent.
:param emails: emails adresses to invite to join datadog
:type emails: string list
:returns: JSON response from HTTP request
"""
if not isinstance(emails, list):
emails = [emails]
body = {
'emails': emails,
}
return super(User, cls)._trigger_action('POST', '/invite_users', **body)
|
{
"content_hash": "068fa276daf737836213d16e2b18b8c8",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 29.59259259259259,
"alnum_prop": 0.6070087609511889,
"repo_name": "rogst/datadogpy",
"id": "d22f2d52a223ab2bef9c07aa4a8313ffecf45fbf",
"size": "799",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "datadog/api/users.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "259858"
},
{
"name": "Ruby",
"bytes": "333"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext as _
from modelcluster.fields import ParentalKey
from wagtail.core.models import Page, Orderable
from wagtail.core.fields import RichTextField
from wagtail.images.models import Image
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel, InlinePanel
from wagtail.images.edit_handlers import ImageChooserPanel
from core.models import BaseModel
class PortfolioPage(BaseModel):
content_panels = Page.content_panels + [
MultiFieldPanel(
[
FieldPanel("section_title"),
FieldPanel("section_subtitle"),
InlinePanel("projects", label=_("projects")),
],
heading=_("Works"),
),
]
promote_panels = Page.promote_panels + [
FieldPanel("linked_data"),
]
class Project(Orderable):
page = ParentalKey(PortfolioPage, related_name="projects")
name = models.CharField(max_length=150)
category = models.CharField(max_length=100, default="", blank=True)
description = RichTextField()
image = models.ForeignKey(Image, on_delete=models.CASCADE, related_name="+")
link = models.URLField(help_text=_("Project link"), null=True, blank=True)
# make the project visible in the homepage
show_in_home = models.BooleanField(default=False)
panels = [
FieldPanel("name"),
ImageChooserPanel("image"),
FieldPanel("link"),
FieldPanel("category"),
FieldPanel("description"),
FieldPanel("show_in_home"),
]
|
{
"content_hash": "bda71b978ba66bc16e9bf5c0d4f45cbf",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 31.58,
"alnum_prop": 0.6700443318556049,
"repo_name": "evonove/evonove.it",
"id": "dd52b4f24b1007ef40656357f4f928c2c278f198",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "django-website/portfolio/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1655"
},
{
"name": "HTML",
"bytes": "49434"
},
{
"name": "JavaScript",
"bytes": "43596"
},
{
"name": "Makefile",
"bytes": "1243"
},
{
"name": "Python",
"bytes": "92905"
},
{
"name": "SCSS",
"bytes": "48162"
},
{
"name": "Shell",
"bytes": "191"
}
],
"symlink_target": ""
}
|
"""
Created on Feb 20, 2014
@author: Aaron Ponti
"""
from ch.systemsx.cisd.openbis.dss.etl.dto.api import SimpleImageContainerDataConfig
class MicroscopyCompositeDatasetConfig(SimpleImageContainerDataConfig):
"""Image data configuration class for composite image files."""
def createChannel(self, channelCode):
"""Create a channel from the channelCode with the name as read from
the file via the MetadataReader and the color (RGB) as read.
@param channelCode Code of the channel as generated by extractImagesMetadata().
Returns a ch.systemsx.cisd.openbis.dss.etl.dto.api.Channel
"""
raise NotImplementedError()
def extractImagesMetadata(self, imagePath, imageIdentifiers):
"""Overrides extractImageMetadata method making sure to store
both series and channel indices in the channel code to be reused
later to extract color information and other metadata.
The channel code is in the form SERIES-(\d+)_CHANNEL-(\d+).
Only metadata for the relevant series number is returned!
@param imagePath Full path to the file to process
@param imageIdentifiers Array of ImageIdentifier's
@see constructor.
"""
raise NotImplementedError()
def _getChannelName(self, seriesIndx, channelIndx):
"""Returns the channel name (from the parsed metadata) for
a given channel in a given series."
"""
raise NotImplementedError()
def _getChannelColor(self, seriesIndx, channelIndx):
"""Returns the channel color (from the parsed metadata) for
a given channel in a given series.
Returns a ch.systemsx.cisd.openbis.dss.etl.dto.api.ChannelColorRGB()
"""
raise NotImplementedError()
def _getSeriesAndChannelNumbers(self, channelCode):
"""Extract series and channel number from channel code in
the form SERIES-(\d+)_CHANNEL-(\d+) to a tuple
(seriesIndx, channelIndx).
@param channelCode Code of the channel as generated by extractImagesMetadata().
Returns seriesIndx, channelIndx
"""
raise NotImplementedError()
|
{
"content_hash": "4691ce324bcaa0a46bc0a83fea27194d",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 87,
"avg_line_length": 30.930555555555557,
"alnum_prop": 0.6731028289178267,
"repo_name": "aarpon/obit_microscopy_core_technology",
"id": "8d0b5802c6bb4a286a03672fa49ec47a9ca4d605",
"size": "2252",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "core-plugins/microscopy/3/dss/drop-boxes/MicroscopyDropbox/MicroscopyCompositeDatasetConfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10185"
},
{
"name": "HTML",
"bytes": "20862"
},
{
"name": "JavaScript",
"bytes": "200876"
},
{
"name": "Python",
"bytes": "478423"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
from mock import patch
from six import StringIO
from biggraphite.cli import bgutil
from tests import test_utils as bg_test_utils
class TestBgutil(bg_test_utils.TestCaseWithFakeAccessor):
metrics = ["metric1", "metric2"]
@patch("sys.stdout", new_callable=StringIO)
def test_run(self, mock_stdout):
self.accessor.drop_all_metrics()
for metric in self.metrics:
self.accessor.create_metric(bg_test_utils.make_metric_with_defaults(metric))
bgutil.main(["--driver=memory", "read", "**"], self.accessor)
output = mock_stdout.getvalue()
for metric in self.metrics:
self.assertIn(metric, output)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "ccc7d53333489987eff90a5ad4ded193",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 88,
"avg_line_length": 27.607142857142858,
"alnum_prop": 0.6714100905562742,
"repo_name": "criteo/biggraphite",
"id": "239d538d93fb3555708add32fbabe947ebdcc8e7",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cli/test_bgutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1191"
},
{
"name": "HTML",
"bytes": "4291"
},
{
"name": "Java",
"bytes": "51088"
},
{
"name": "Python",
"bytes": "757226"
},
{
"name": "Shell",
"bytes": "13618"
}
],
"symlink_target": ""
}
|
from discord.ext import commands
from discord.ext.commands import Cog
import re
def setup(bot):
bot.add_cog(SwitchSerialNumberCheck(bot))
class SwitchSerialNumberCheck(Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["ssnc"])
async def check_nx_serial(self, ctx, serial):
"""Check the given Switch serial to see if it is patched or not. For safety reasons, the invoking message is
removed."""
try:
await ctx.message.delete()
except:
pass
serial = serial.split()[0].upper()
mariko = False
if not re.match("XA[JKW][1479][0-9]{6}", serial):
# This should catch serials from the new "mariko" units
# XKW10000000000, XKJ10000000000 = HAC-001-01, the "New Switch"
# XJW01000000000, XWW01000000000 = HDH-001, the Switch Lite
# As not much about the assembly line is known yet every digit will count for the filter
if re.match("X[KJW][JWC][0-9]{7}", serial):
# Region "C" is Tencent-Nintendo Switch. Mariko.
mariko = True
else:
return await ctx.send("This is not a valid serial number!\n"
"If you believe this to be an error, contact staff.")
patched = False
maybe = False
region = serial[2]
assembly_line = int(serial[3])
checking_value = int(serial[3:10])
safe_serial = serial[:9] + 'XXXX'
if region == 'J':
if assembly_line == 1:
if checking_value < 1002000:
pass
elif 1002000 <= checking_value < 1003000:
maybe = True
elif checking_value >= 1003000:
patched = True
elif assembly_line == 4:
if checking_value < 4004600:
pass
elif 4004600 <= checking_value < 4006000:
maybe = True
elif checking_value >= 4006000:
patched = True
elif assembly_line == 7:
if checking_value < 7004000:
pass
elif 7004000 <= checking_value < 7005000:
maybe = True
elif checking_value >= 7005000:
patched = True
elif region == 'W':
if assembly_line == 1:
if checking_value < 1007400:
pass
elif 1007400 <= checking_value < 1012000: # GBATemp thread is oddly disjointed here, proper value could
# be 1007500, not sure.
maybe = True
elif checking_value >= 1012000:
patched = True
elif assembly_line == 4:
if checking_value < 4001100:
pass
elif 4001100 <= checking_value < 4001200:
maybe = True
elif checking_value >= 4001200:
patched = True
elif assembly_line == 7:
if checking_value < 7001780:
pass
elif 7001780 <= checking_value < 7003000:
maybe = True
elif checking_value >= 7003000:
maybe = True
elif assembly_line == 9:
maybe = True
elif region == 'K':
maybe = True
if mariko:
return await ctx.send("{}: Serial {} seems to be a \"mariko\" Switch or Switch Lite.\n"
"These are currently not hackable via software, only hardware modifications that involve soldering modchips.".format(ctx.author.mention, safe_serial))
elif maybe:
return await ctx.send("{}: Serial {} _might_ be patched. The only way you can know this for sure is by "
"pushing the payload manually. You can find instructions to do so here: "
"https://switchgui.de/switch-guide/user_guide/emummc/sending_payload/".format(ctx.author.mention,
safe_serial))
elif patched:
return await ctx.send("{}: Serial {} is patched.".format(ctx.author.mention, safe_serial))
else:
return await ctx.send("{}: Serial {} is not patched.".format(ctx.author.mention, safe_serial))
|
{
"content_hash": "e61cc753e7dc258e638611f650201fe6",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 184,
"avg_line_length": 39.43478260869565,
"alnum_prop": 0.5045203969128996,
"repo_name": "thedax/Kurisu",
"id": "3b17bfc0bfd1004753f1522ed4fc5e9ceed8ac1a",
"size": "5319",
"binary": false,
"copies": "2",
"ref": "refs/heads/port",
"path": "cogs/ssnc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "351156"
}
],
"symlink_target": ""
}
|
import logging
import numpy
import theano
from theano import tensor
from blocks.roles import VariableRole, add_role
from blocks.bricks import (
Initializable, Linear, Sequence, Tanh)
from blocks.bricks.base import lazy, application
from blocks.bricks.parallel import Fork
from blocks.bricks.recurrent import Bidirectional
from blocks.bricks.sequence_generators import (
AbstractFeedback, LookupFeedback, AbstractEmitter)
from blocks.utils import dict_union, check_theano_variable
from lvsr.ops import RewardOp
logger = logging.getLogger(__name__)
class RecurrentWithFork(Initializable):
@lazy(allocation=['input_dim'])
def __init__(self, recurrent, input_dim, **kwargs):
super(RecurrentWithFork, self).__init__(**kwargs)
self.recurrent = recurrent
self.input_dim = input_dim
self.fork = Fork(
[name for name in self.recurrent.sequences
if name != 'mask'],
prototype=Linear())
self.children = [recurrent.brick, self.fork]
def _push_allocation_config(self):
self.fork.input_dim = self.input_dim
self.fork.output_dims = [self.recurrent.brick.get_dim(name)
for name in self.fork.output_names]
@application(inputs=['input_', 'mask'])
def apply(self, input_, mask=None, **kwargs):
return self.recurrent(
mask=mask, **dict_union(self.fork.apply(input_, as_dict=True),
kwargs))
@apply.property('outputs')
def apply_outputs(self):
return self.recurrent.states
class InitializableSequence(Sequence, Initializable):
pass
class Encoder(Initializable):
def __init__(self, enc_transition, dims, dim_input, subsample, bidir, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.subsample = subsample
dims_under = [dim_input] + list((2 if bidir else 1) * numpy.array(dims))
for layer_num, (dim_under, dim) in enumerate(zip(dims_under, dims)):
layer = RecurrentWithFork(
enc_transition(dim=dim, activation=Tanh()).apply,
dim_under,
name='with_fork{}'.format(layer_num))
if bidir:
layer = Bidirectional(layer, name='bidir{}'.format(layer_num))
self.children.append(layer)
self.dim_encoded = (2 if bidir else 1) * dims[-1]
@application(outputs=['encoded', 'encoded_mask'])
def apply(self, input_, mask=None):
for layer, take_each in zip(self.children, self.subsample):
input_ = layer.apply(input_, mask)
input_ = input_[::take_each]
if mask:
mask = mask[::take_each]
return input_, (mask if mask else tensor.ones_like(input_[:, :, 0]))
def get_dim(self, name):
if name == self.apply.outputs[0]:
return self.dim_encoded
return super(Encoder, self).get_dim(name)
class OneOfNFeedback(AbstractFeedback, Initializable):
"""A feedback brick for the case when readout are integers.
Stores and retrieves distributed representations of integers.
"""
def __init__(self, num_outputs=None, feedback_dim=None, **kwargs):
super(OneOfNFeedback, self).__init__(**kwargs)
self.num_outputs = num_outputs
self.feedback_dim = num_outputs
@application
def feedback(self, outputs):
assert self.output_dim == 0
eye = tensor.eye(self.num_outputs)
check_theano_variable(outputs, None, "int")
output_shape = [outputs.shape[i]
for i in range(outputs.ndim)] + [self.feedback_dim]
return eye[outputs.flatten()].reshape(output_shape)
def get_dim(self, name):
if name == 'feedback':
return self.feedback_dim
return super(LookupFeedback, self).get_dim(name)
class OtherLoss(VariableRole):
pass
OTHER_LOSS = OtherLoss()
class RewardRegressionEmitter(AbstractEmitter):
GAIN_MATRIX = 'gain_matrix'
REWARD_MATRIX = 'reward_matrix'
GAIN_MSE_LOSS = 'gain_mse_loss'
REWARD_MSE_LOSS = 'reward_mse_loss'
GROUNDTRUTH = 'groundtruth'
def __init__(self, criterion, eos_label,
alphabet_size, min_reward, **kwargs):
self.criterion = criterion
self.reward_op = RewardOp(eos_label, alphabet_size)
self.min_reward = min_reward
super(RewardRegressionEmitter, self).__init__(**kwargs)
@application
def cost(self, application_call, readouts, outputs):
if readouts.ndim == 3:
temp_shape = (readouts.shape[0] * readouts.shape[1], readouts.shape[2])
correct_mask = tensor.zeros(temp_shape)
correct_mask = tensor.set_subtensor(
correct_mask[tensor.arange(temp_shape[0]), outputs.flatten()], 1)
correct_mask = correct_mask.reshape(readouts.shape)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING:
# this code only makes sense when the actual groundtruths
# are plugged for groundtruths.
#
# This happens in SpeechRecognizer.get_cost_graph()
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
groundtruth = outputs.copy()
groundtruth.name = self.GROUNDTRUTH
reward_matrix, gain_matrix = self.reward_op(groundtruth, outputs)
gain_matrix = theano.tensor.maximum(gain_matrix, self.min_reward)
gain_matrix.name = self.GAIN_MATRIX
reward_matrix.name = self.REWARD_MATRIX
predicted_gains = readouts.reshape(temp_shape)[
tensor.arange(temp_shape[0]), outputs.flatten()]
predicted_gains = predicted_gains.reshape(outputs.shape)
predicted_gains = tensor.concatenate([
tensor.zeros((1, outputs.shape[1])), predicted_gains[1:]])
predicted_rewards = predicted_gains.cumsum(axis=0)
predicted_rewards = readouts + predicted_rewards[:, :, None]
gain_mse_loss_matrix = ((readouts - gain_matrix) ** 2).sum(axis=-1)
reward_mse_loss_matrix = ((predicted_rewards - reward_matrix) ** 2).sum(axis=-1)
gain_mse_loss = gain_mse_loss_matrix.sum()
gain_mse_loss.name = self.GAIN_MSE_LOSS
reward_mse_loss = reward_mse_loss_matrix.sum()
reward_mse_loss.name = self.REWARD_MSE_LOSS
application_call.add_auxiliary_variable(gain_mse_loss)
if self.criterion == 'mse_gain':
add_role(reward_mse_loss, OTHER_LOSS)
application_call.add_auxiliary_variable(reward_mse_loss)
return gain_mse_loss_matrix
else:
add_role(gain_mse_loss, OTHER_LOSS)
application_call.add_auxiliary_variable(gain_mse_loss)
return reward_mse_loss_matrix
return readouts[tensor.arange(readouts.shape[0]), outputs]
@application
def emit(self, readouts):
# As a generator, acts greedily
return readouts.argmax(axis=1)
@application
def costs(self, readouts):
return -readouts
@application
def initial_outputs(self, batch_size):
# As long as we do not use the previous character, can be anything
return tensor.zeros((batch_size,), dtype='int64')
def get_dim(self, name):
if name == 'outputs':
return 0
return super(RewardRegressionEmitter, self).get_dim(name)
|
{
"content_hash": "65fe4c287af6ddcea47cb361e057c8bc",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 92,
"avg_line_length": 37.381188118811885,
"alnum_prop": 0.6050854191497815,
"repo_name": "rizar/attention-lvcsr",
"id": "5a595e89d967ed45def8f96b86eb469a5cf0fb5f",
"size": "7551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lvsr/bricks/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "156742"
},
{
"name": "C++",
"bytes": "209135"
},
{
"name": "CSS",
"bytes": "3500"
},
{
"name": "Cuda",
"bytes": "231732"
},
{
"name": "Gnuplot",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "33356"
},
{
"name": "Jupyter Notebook",
"bytes": "191071"
},
{
"name": "Makefile",
"bytes": "973"
},
{
"name": "Python",
"bytes": "9313243"
},
{
"name": "Shell",
"bytes": "34454"
},
{
"name": "TeX",
"bytes": "102624"
}
],
"symlink_target": ""
}
|
from controller import controller
def main():
controller.run()
if __name__=='__main__':
main()
|
{
"content_hash": "376e2e1be3c98d61ec48419f00275d6d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 33,
"avg_line_length": 15,
"alnum_prop": 0.6095238095238096,
"repo_name": "ohbarye/gracenote-api-sample",
"id": "69cc7f5cec412f21d4054a3b8b850a6eefb3781e",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "startup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30969"
},
{
"name": "Smarty",
"bytes": "1778"
}
],
"symlink_target": ""
}
|
"""
Parser module specific to BCL::Contact predictions
"""
__author__ = "Felix Simkovic"
__date__ = "12 Dec 2016"
__version__ = "0.1"
import re
from conkit.io._parser import ContactFileParser
from conkit.core.contact import Contact
from conkit.core.contactmap import ContactMap
from conkit.core.contactfile import ContactFile
RE_SPLIT = re.compile(r'\s+')
class BCLContactParser(ContactFileParser):
"""Class to parse a BCL::Contact contact file
"""
def __init__(self):
super(BCLContactParser, self).__init__()
def read(self, f_handle, f_id="bclcontact"):
"""Read a contact file
Parameters
----------
f_handle
Open file handle [read permissions]
f_id : str, optional
Unique contact file identifier
Returns
-------
:obj:`~conkit.core.contactfile.ContactFile`
"""
hierarchy = ContactFile(f_id)
contact_map = ContactMap("map_1")
hierarchy.add(contact_map)
for line in f_handle:
line = line.rstrip()
if line:
res1_seq, res1, res2_seq, res2, _, _, _, _, _, raw_score = RE_SPLIT.split(line)
contact = Contact(int(res1_seq), int(res2_seq), float(raw_score))
contact.res1 = res1
contact.res2 = res2
contact_map.add(contact)
hierarchy.method = 'Contact map predicted using BCL::Contact'
return hierarchy
def write(self, f_handle, hierarchy):
"""Write a contact file instance to to file
Parameters
----------
f_handle
Open file handle [write permissions]
hierarchy : :obj:`~conkit.core.contactfile.ContactFile`, :obj:`~conkit.core.contactmap.ContactMap`
or :obj:`~conkit.core.contact.Contact`
Raises
------
:exc:`NotImplementedError`
Write function not available
"""
raise NotImplementedError("Write function not available")
|
{
"content_hash": "e786c71a5c90f01cb211f25260829d02",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 106,
"avg_line_length": 27.405405405405407,
"alnum_prop": 0.5833333333333334,
"repo_name": "fsimkovic/conkit",
"id": "142387bb06d8cbe8b09a92f14d2b97d742b84431",
"size": "3606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conkit/io/bclcontact.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "733433"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class FIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
@test_util.run_in_graph_and_eager_modes
def testMultipleDequeues(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue_many([[1, 2, 3]]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
@test_util.run_in_graph_and_eager_modes
def testQueuesDontShare(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), ((),
(2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(50, dtypes_lib.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.FIFOQueue(100, dtypes_lib.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.FIFOQueue(total_count, dtypes_lib.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, (dtypes_lib.float32, dtypes_lib.float32), (
(), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(1, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_f")
q_f_2 = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(data_flow_ops.FIFOQueue(10, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
q2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32)
enq_q = data_flow_ops.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.cached_session() as sess:
q_empty = data_flow_ops.FIFOQueue(5, dtypes_lib.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.FIFOQueue(5, dtypes_lib.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.uint16, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDequeueEnqueueFail(self):
with self.cached_session() as session:
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
a = q.dequeue()
b = control_flow_ops.Assert(False, ["Before enqueue"])
with ops.control_dependencies([b]):
c = q.enqueue(33)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Before enqueue" in str(e)):
session.run([a, c])
class FIFOQueueDictTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "j"),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "f"),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32, dtypes_lib.string),
shapes=((), (), ()),
names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"],
"x": [1, 2]
})
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"]
})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([compat.as_bytes("bb"), compat.as_bytes("cc")], list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
class FIFOQueueWithTimeoutTest(test.TestCase):
def testDequeueWithTimeout(self):
with self.session(
config=config_pb2.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes(""), q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t)
def testReusableAfterTimeout(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
sess.run(enqueue_op)
self.assertEqual(37, sess.run(dequeued_t))
class QueueContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
with ops.container("test"):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes("test"), q.queue_ref.op.get_attr("container"))
class FIFOQueueBenchmark(test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
"""Builds a graph that enqueues and dequeues a single float.
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
q = data_flow_ops.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
return init, q_inc
# TODO(suharshs): Add benchmarks for:
# - different capacities of the queue
# - various sizes of tensors
# - enqueue_many, dequeue_many
def _run(self, num_iters):
"""Benchmarks enqueueing and dequeueing from a FIFOQueue.
Args:
num_iters: The number of iterations to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
init, output = self._build_graph()
with session_lib.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(output)
duration = time.time() - start_time
print("%f secs per enqueue-dequeue" % (duration / num_iters))
self.report_benchmark(
name="fifo_queue", iters=num_iters, wall_time=duration / num_iters)
return duration
if __name__ == "__main__":
test.main()
|
{
"content_hash": "8bc49745d215abf93c0f8f18b10fe521",
"timestamp": "",
"source": "github",
"line_count": 1663,
"max_line_length": 80,
"avg_line_length": 36.37101623571858,
"alnum_prop": 0.6112755228569067,
"repo_name": "seanli9jan/tensorflow",
"id": "8961c4b13c25269671fdc16fc425516d01970892",
"size": "61174",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/fifo_queue_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import numpy
import apogee.tools.read as apread
_DATA= apread.rcsample() #such that we can re-use it in different tests
def test_int64():
#Test that there aren't 64-bit integers
for key in _DATA.dtype.names:
assert not numpy.issubdtype(_DATA[key].dtype,numpy.int64), "Tag '%s' in the RC catalog is a 64-bit signed integer that might give problems for some readers" % key
return None
|
{
"content_hash": "0cdbf75151dfdec6f3c8a7b55eafe593",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 170,
"avg_line_length": 44.888888888888886,
"alnum_prop": 0.7227722772277227,
"repo_name": "jobovy/apogee",
"id": "5f89bce942ba461880c013e7d41e568b328e5d5c",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "apogee/test/test_rcCat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "2808"
},
{
"name": "Python",
"bytes": "712143"
}
],
"symlink_target": ""
}
|
from ropper import RopperService
# not all options need to be given
options = {'color' : False, # if gadgets are printed, use colored output: default: False
'badbytes': '00', # bad bytes which should not be in addresses or ropchains; default: ''
'all' : False, # Show all gadgets, this means to not remove double gadgets; default: False
'inst_count' : 6, # Number of instructions in a gadget; default: 6
'type' : 'all', # rop, jop, sys, all; default: all
'detailed' : False} # if gadgets are printed, use detailed output; default: False
rs = RopperService(options)
##### change options ######
rs.options.color = True
rs.options.badbytes = '00'
rs.options.badbytes = ''
rs.options.all = True
##### open binaries ######
# it is possible to open multiple files
rs.addFile('test-binaries/ls-x86')
rs.addFile('ls', bytes=open('test-binaries/ls-x86','rb').read()) # other possiblity
rs.addFile('ls_raw', bytes=open('test-binaries/ls-x86','rb').read(), raw=True, arch='x86')
##### close binaries ######
rs.removeFile('ls')
rs.removeFile('ls_raw')
# Set architecture of a binary, so it is possible to look for gadgets for a different architecture
# It is useful for ARM if you want to look for ARM gadgets or Thumb gadgets
# Or if you opened a raw file
ls = 'test-binaries/ls-x86'
rs.setArchitectureFor(name=ls, arch='x86')
rs.setArchitectureFor(name=ls, arch='x86_64')
rs.setArchitectureFor(name=ls, arch='ARM')
rs.setArchitectureFor(name=ls, arch='ARMTHUMB')
rs.setArchitectureFor(name=ls, arch='ARM64')
rs.setArchitectureFor(name=ls, arch='MIPS')
rs.setArchitectureFor(name=ls, arch='MIPS64')
rs.setArchitectureFor(name=ls, arch='PPC')
rs.setArchitectureFor(name=ls, arch='PPC64')
rs.setArchitectureFor(name=ls, arch='SPARC64')
rs.setArchitectureFor(name=ls, arch='x86')
##### load gadgets ######
# load gadgets for all opened files
rs.loadGadgetsFor()
# load gadgets for only one opened file
ls = 'test-binaries/ls-x86'
rs.loadGadgetsFor(name=ls)
# change gadget type
rs.options.type = 'jop'
rs.loadGadgetsFor()
rs.options.type = 'rop'
rs.loadGadgetsFor()
# change instruction count
rs.options.inst_count = 10
rs.loadGadgetsFor()
##### print gadgets #######
rs.printGadgetsFor() # print all gadgets
rs.printGadgetsFor(name=ls)
##### Get gadgets ######
gadgets = rs.getFileFor(name=ls).gadgets
##### search pop pop ret ######
pprs = rs.searchPopPopRet(name=ls) # looks for ppr only in 'test-binaries/ls-x86'
pprs = rs.searchPopPopRet() # looks for ppr in all opened files
for file, ppr in pprs.items():
for p in ppr:
print(p)
##### load jmp reg ######
jmp_regs = rs.searchJmpReg(name=ls, regs=['esp', 'eax']) # looks for jmp reg only in 'test-binaries/ls-x86'
jmp_regs = rs.searchJmpReg(regs=['esp', 'eax'])
jmp_regs = rs.searchJmpReg() # looks for jmp esp in all opened files
for file, jmp_reg in jmp_regs.items():
for j in jmp_reg:
print(j)
##### search opcode ######
ls = 'test-binaries/ls-x86'
gadgets_dict = rs.searchOpcode(opcode='ffe4', name=ls)
gadgets_dict = rs.searchOpcode(opcode='ffe?')
gadgets_dict = rs.searchOpcode(opcode='??e4')
for file, gadgets in gadgets_dict.items():
for g in gadgets:
print(g)
##### search instructions ######
ls = 'test-binaries/ls-x86'
for file, gadget in rs.search(search='mov e?x', name=ls):
print(file, gadget)
for file, gadget in rs.search(search='mov [e?x%]'):
print(file, gadget)
result_dict = rs.searchdict(search='mov eax')
for file, gadgets in result_dict.items():
print(file)
for gadget in gadgets:
print(gadget)
##### assemble instructions ######
hex_string = rs.asm('jmp esp')
print('"jmp esp" assembled to hex string =', hex_string)
raw_bytes = rs.asm('jmp esp', format='raw')
print('"jmp esp" assembled to raw bytes =', raw_bytes)
string = rs.asm('jmp esp', format='string')
print('"jmp esp" assembled to string =', string)
arm_bytes = rs.asm('bx sp', arch='ARM')
print('"bx sp" assembled to hex string =', arm_bytes)
##### disassemble bytes #######
arm_instructions = rs.disasm(arm_bytes, arch='ARM')
print(arm_bytes, 'disassembled to "%s"' % arm_instructions)
# Change the imagebase, this also change the imagebase for all loaded gadgets of this binary
rs.setImageBaseFor(name=ls, imagebase=0x0)
# reset image base
rs.setImageBaseFor(name=ls, imagebase=None)
gadgets = rs.getFileFor(name=ls).gadgets
# gadget address
print(hex(gadgets[0].address))
# get instruction bytes of gadget
print(bytes(gadgets[0].bytes).encode('hex'))
# remove all gadgets containing bad bytes in address
rs.options.badbytes = '000a0d' # gadgets are filtered automatically
|
{
"content_hash": "21985462a374882b6cddf932d5d0067c",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 107,
"avg_line_length": 32.48275862068966,
"alnum_prop": 0.6817409766454352,
"repo_name": "sashs/Ropper",
"id": "ca723a2a353bd649970c4d33eb0cbddfead2f98c",
"size": "4732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "704"
},
{
"name": "Python",
"bytes": "401764"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from fnmatch import fnmatch
from glob import glob
from tabulate import tabulate
import os.path as osp, itertools,numpy as np
from control4.algs.alg_params import str2numlist
from control4.bench.diagnostic_common import disp_dict_as_2d_array,disp_dict_as_3d_array,compute_mean_std_across_runs,load_hdfs_as_dataframe
from control4.misc.collection_utils import concatenate
from control4.misc.console_utils import check_output_and_print
from pandas.stats.moments import ewma
def disp_info(df):
stat_names = df.stat_name.unique()
script_names = df.script_name.unique()
statname2scriptnames = {stat_name:['']*len(script_names) for stat_name in stat_names}
for (i_script,(script_name,script_grp)) in enumerate(df.groupby("script_name")):
for stat_name in script_grp.stat_name.unique():
statname2scriptnames[stat_name][i_script] = 'x'
print tabulate([[stat_name]+li for (stat_name,li) in statname2scriptnames.items()], headers = map(str,range(len(script_names))))
print "--------------------"
print " Script Names "
for (i,script_name) in enumerate(script_names):
print "%i: %s"%(i,script_name)
def filter_dataframe(df, col, desc):
if "," in desc:
scriptnames = desc.split(",")
filtfn = lambda s: s in scriptnames
elif "*" in desc:
filtfn = lambda s: fnmatch(s, desc)
else:
filtfn = lambda s: s==desc
vals = filter(filtfn, df[col].unique() )
mask = [item in vals for item in df[col]]
return df[mask]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dir")
parser.add_argument("--disp_info",action="store_true")
parser.add_argument("--stat_names",help="comma-separated list of diagnostic stats you want to display",default="")
parser.add_argument("--avg_runs",action="store_true")
parser.add_argument("--output",choices=["table","lc"],default="table")
parser.add_argument("--table_style",choices=["0","1","2","3"],help="0: script/stat. 1: cfg+run/test+stat. 2: stat/cfg+run/test, 3: cfg+run/stat",default="0")
parser.add_argument("--table_stat",choices=["final","mean"],default="final")
parser.add_argument("--plot_mode",choices=["show","save"],default="show")
parser.add_argument("--versions",action="store_true")
parser.add_argument("--n_cfg",type=int)
parser.add_argument("--label_lines_by",choices=["script","cfg"],default="script")
parser.add_argument("--save_plot_to")
parser.add_argument("--script_include")
parser.add_argument("--cfg_include")
parser.add_argument("--test_include")
parser.add_argument("--smooth_span",type=int,default=0)
args = parser.parse_args()
h5files = []
for dirname in args.dir.split(","):
h5files.extend( glob(osp.join(dirname,"*.h5")) )
if len(h5files) == 0:
h5files = glob(osp.join(args.dir,"*/*.h5"))
assert len(h5files) > 0
assert args.disp_info or len(args.stat_names)>0
if args.versions:
gitrevlist = [line[:7] for line in check_output_and_print("cd $CTRL_ROOT && git rev-list HEAD").split("\n")]
cfg_sortkey = lambda cfg: gitrevlist.index(cfg[:7])
h5files = concatenate([glob(osp.join(args.dir, "%s/*.h5"%rev)) for rev in sorted(gitrevlist, key=cfg_sortkey)[:args.n_cfg]])
df = load_hdfs_as_dataframe(h5files)
for prefix in ["test","cfg","script"]:
filter_pat = args.__dict__["%s_include"%prefix]
if filter_pat:
df = filter_dataframe(df, "%s_name"%prefix,filter_pat)
if args.disp_info:
disp_info(df)
else:
stat_names = str2numlist(args.stat_names,str)
colors = 'bgrcmyk'
styles = ['-x','-','--','-.']
linestyles = [color+style for style in styles for color in colors ]
if args.avg_runs:
df = compute_mean_std_across_runs(df)
if args.output == "lc":
import matplotlib.pyplot as plt
# Plotting 6D data! figure=test, axis=stat, cfg=linecolor, run=<nothing>, xaxis=time, yaxis=value
for (test_name,test_name_grp) in df.groupby("test_name"):
stat_names_here = list(set(test_name_grp.stat_name.unique()).intersection(stat_names))
_, axarr = plt.subplots(len(stat_names_here), sharex=True)
if not isinstance(axarr,np.ndarray):
axarr = [axarr]
# plt.title(test_name)
for (stat_name,stat_name_grp) in test_name_grp.groupby("stat_name"):
if stat_name not in stat_names: continue
i_stat = stat_names_here.index(stat_name)
axarr[i_stat].set_title("%s: %s"%(test_name,stat_name))
cfg2line = OrderedDict()
ax = axarr[i_stat]
it = stat_name_grp.groupby("cfg_name" if args.label_lines_by == "cfg" else "script_name")
if args.versions: it = sorted(it, key=lambda (cfg_name,_) : cfg_sortkey(cfg_name))
if args.n_cfg: it = itertools.islice(it,0,args.n_cfg)
for (i_cfg,(cfg_name,cfg_name_group)) in enumerate(it):
linestyle = linestyles[i_cfg]
for timeseries in cfg_name_group["timeseries"]:
if args.smooth_span > 0: timeseries = ewma(timeseries,span=args.smooth_span)
line,=ax.plot(timeseries,linestyle)
if args.avg_runs:
assert len(cfg_name_group)==1
stderr = cfg_name_group["timeseries_stderr"].irow(0)
ax.fill_between(np.arange(len(timeseries)), timeseries-stderr,timeseries+stderr,color=linestyle[0],alpha=.2)
cfg2line[cfg_name]=line
ax.legend(cfg2line.values(),cfg2line.keys(),prop={'size':8}).draggable()
if args.plot_mode == "show":
plt.show()
else:
plt.savefig(args.save_plot_to)
elif args.output == "table":
data = {}
cfg_run_repr = lambda cfg_name,i_run: cfg_name if args.avg_runs else "%s-RUN%i"%(cfg_name,i_run)
if args.table_style == "0":
keyfunc = lambda cfg_name,i_run,stat_name,test_name,script_name : (cfg_run_repr(script_name,i_run),stat_name)
table_dim = 2
elif args.table_style == "1":
keyfunc = lambda cfg_name,i_run,stat_name,test_name,script_name : (cfg_run_repr(cfg_name,i_run),"%s-%s"%(test_name,stat_name))
table_dim = 2
elif args.table_style == "2":
table_dim = 3
keyfunc = lambda cfg_name,i_run,stat_name,test_name,script_name : (stat_name,cfg_run_repr(cfg_name,i_run),test_name)
elif args.table_style == "3":
table_dim = 2
keyfunc = lambda cfg_name,i_run,stat_name,test_name,script_name : (cfg_run_repr(cfg_name,i_run),stat_name)
else:
raise RuntimeError
it = df.groupby(['cfg_name','i_run','stat_name','test_name','script_name'])
for ((cfg_name,i_run,stat_name,test_name,script_name),grp) in it:
if args.versions: cfg_name="%.4i-%s"%(cfg_sortkey(cfg_name),cfg_name)
if stat_name in stat_names:
assert len(grp)==1
key = keyfunc(cfg_name,i_run,stat_name,test_name,script_name)
data[key] = (grp["timeseries"].irow(0).mean() if args.table_stat == "mean" else grp["timeseries"].irow(0)[-1])
if table_dim == 2:
disp_dict_as_2d_array(data,use_numeric_keys=True)
elif table_dim == 3:
disp_dict_as_3d_array(data,use_numeric_keys=True)
if __name__ == "__main__":
main()
|
{
"content_hash": "13c4c9673aaa3d2a20a6b2ff7351905c",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 161,
"avg_line_length": 48.21084337349398,
"alnum_prop": 0.5786580032487817,
"repo_name": "SFPD/rlreloaded",
"id": "022f8a5b74f3328ae5eee343eb285d7a51dd48a0",
"size": "8100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/bench/analyze_experiment.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "753"
},
{
"name": "C++",
"bytes": "88527"
},
{
"name": "CMake",
"bytes": "33134"
},
{
"name": "Python",
"bytes": "478983"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
}
|
from pygeocoder import Geocoder
import us, csv, json
WRITE = 'wb'
READ = 'rb'
#corpus = json.load(open('bieber-raw-test.json',READ))
corpus = json.load(open('#birthday-birthday-20140626-082517.json',READ))
#corpus = json.load(open('sxsw-SXSW-#SXSW-#sxsw-20140308-001535.json',READ))
def get_location(tweet):
if tweet['coordinates']:
lat,lon = tuple(tweet['coordinates']['coordinates'])
try:
location = Geocoder.reverse_geocode(lat,lon)
if location.state == 'United States':
return us.states.lookup(location.administrative_area_level_1).abbr.strip()
except:
pass
elif tweet['place']:
return tweet['place']['full_name'].split(',')[-1].strip()
print 'Hi'
locations = [get_location(tweet) for tweet in corpus]
print filter(None,locations)
states = [state.name for state in us.states.STATES]
prevalence = {state:count for state,count in zip(states,[locations.count(state) for state in states])}
print prevalence
with open('prevalence-birthday.csv', 'wb') as f:
print>>f,'name,prevalence'
for state,count in prevalence.items():
print>>f,'%s,%d'%(state,count)
|
{
"content_hash": "95d565b8b71d8a7c2ece51f2e3473d0b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 102,
"avg_line_length": 34.96774193548387,
"alnum_prop": 0.7149446494464945,
"repo_name": "mac389/Maui",
"id": "059764502eb02a5c3edb8adbbbdc70ed21f28c05",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coordinates_states.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24484"
},
{
"name": "Shell",
"bytes": "419"
}
],
"symlink_target": ""
}
|
"""
A Python class implementing KBHIT, the standard keyboard-interrupt poller.
Works transparently on Windows and Posix (Linux, Mac OS X). Doesn't work
with IDLE.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import os
# Windows
if os.name == 'nt':
# noinspection PyUnresolvedReferences
import msvcrt
# Posix (Linux, OS X)
else:
import sys
import termios
import atexit
from select import select
class KBHit:
def __init__(self):
'''Creates a KBHit object that you can call to do various keyboard things.
'''
if os.name == 'nt':
pass
else:
# Save the terminal settings
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
# New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
# Support normal-terminal reset at exit
atexit.register(self.set_normal_term)
def set_normal_term(self):
''' Resets to normal terminal. On Windows this is a no-op.
'''
if os.name == 'nt':
pass
else:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def getch(self):
''' Returns a keyboard character after kbhit() has been called.
Should not be called in the same program as getarrow().
'''
s = ''
if os.name == 'nt':
return msvcrt.getch().decode('utf-8')
else:
return sys.stdin.read(1)
def getarrow(self):
''' Returns an arrow-key code after kbhit() has been called. Codes are
0 : up
1 : right
2 : down
3 : left
Should not be called in the same program as getch().
'''
if os.name == 'nt':
msvcrt.getch() # skip 0xE0
c = msvcrt.getch()
vals = [72, 77, 80, 75]
else:
c = sys.stdin.read(3)[2]
vals = [65, 67, 66, 68]
return vals.index(ord(c.decode('utf-8')))
def kbhit(self):
''' Returns True if keyboard character was hit, False otherwise.
'''
if os.name == 'nt':
return msvcrt.kbhit()
else:
dr, dw, de = select([sys.stdin], [], [], 0)
return dr != []
# Test
if __name__ == "__main__":
kb = KBHit()
print('Hit any key, or ESC to exit')
while True:
if kb.kbhit():
c = kb.getch()
if ord(c) == 27: # ESC
break
print(c)
kb.set_normal_term()
|
{
"content_hash": "894c47132a412282da4363fc33a79681",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 76,
"avg_line_length": 21.8,
"alnum_prop": 0.6667889908256881,
"repo_name": "kevinkahn/softconsole",
"id": "4df204e677f9ed09efcea6c4f88634861c947c6f",
"size": "2747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deprecated/statusinput.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Euphoria",
"bytes": "267"
},
{
"name": "Python",
"bytes": "839903"
},
{
"name": "Shell",
"bytes": "101927"
}
],
"symlink_target": ""
}
|
import __main__
import argparse
import code
import os
import sys
def add_config_parameter(parser):
parser.add_argument(
'-c', '--config', dest='config_file', action='store', type=str,
help='custom config file', default=None
)
def load_run_parsers(subparsers):
run_parser = subparsers.add_parser(
'run', help='run application locally'
)
run_parser.add_argument(
'-p', '--port', dest='port', action='store', type=str,
help='application port', default='8000'
)
run_parser.add_argument(
'-a', '--address', dest='address', action='store', type=str,
help='application address', default='0.0.0.0'
)
run_parser.add_argument(
'--fake-tasks', action='store_true', help='fake tasks'
)
run_parser.add_argument(
'--fake-tasks-amqp', action='store_true',
help='fake tasks with real AMQP'
)
run_parser.add_argument(
'--keepalive',
action='store_true',
help='run keep alive thread'
)
add_config_parameter(run_parser)
run_parser.add_argument(
'--fake-tasks-tick-count', action='store', type=int,
help='Fake tasks tick count'
)
run_parser.add_argument(
'--fake-tasks-tick-interval', action='store', type=int,
help='Fake tasks tick interval in seconds'
)
run_parser.add_argument(
'--authentication-method', action='store', type=str,
help='Choose authentication type',
choices=['none', 'fake', 'keystone'],
)
def load_db_parsers(subparsers):
subparsers.add_parser(
'syncdb', help='sync application database'
)
subparsers.add_parser(
'dropdb', help='drop application database'
)
# fixtures
loaddata_parser = subparsers.add_parser(
'loaddata', help='load data from fixture'
)
loaddata_parser.add_argument(
'fixture', action='store', help='json fixture to load'
)
dumpdata_parser = subparsers.add_parser(
'dumpdata', help='dump models as fixture'
)
dumpdata_parser.add_argument(
'model', action='store', help='model name to dump; underscored name'
'should be used, e.g. network_group for NetworkGroup model'
)
subparsers.add_parser(
'loaddefault',
help='load data from default fixtures '
'(settings.FIXTURES_TO_IPLOAD)'
)
def load_alembic_parsers(migrate_parser):
alembic_parser = migrate_parser.add_subparsers(
dest="alembic_command",
help='alembic command'
)
for name in ['current', 'history', 'branches']:
parser = alembic_parser.add_parser(name)
for name in ['upgrade', 'downgrade']:
parser = alembic_parser.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser = alembic_parser.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser = alembic_parser.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
def load_db_migrate_parsers(subparsers):
migrate_parser = subparsers.add_parser(
'migrate', help='dealing with DB migration'
)
load_alembic_parsers(migrate_parser)
def load_dbshell_parsers(subparsers):
dbshell_parser = subparsers.add_parser(
'dbshell', help='open database shell'
)
add_config_parameter(dbshell_parser)
def load_test_parsers(subparsers):
subparsers.add_parser(
'test', help='run unit tests'
)
def load_shell_parsers(subparsers):
shell_parser = subparsers.add_parser(
'shell', help='open python REPL'
)
add_config_parameter(shell_parser)
def load_settings_parsers(subparsers):
subparsers.add_parser(
'dump_settings', help='dump current settings to YAML'
)
def action_dumpdata(params):
import logging
logging.disable(logging.WARNING)
from nailgun.db.sqlalchemy import fixman
fixman.dump_fixture(params.model)
sys.exit(0)
def action_loaddata(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
with open(params.fixture, "r") as fileobj:
fixman.upload_fixture(fileobj)
logger.info("Done")
def action_loaddefault(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
fixman.upload_fixtures()
logger.info("Done")
def action_syncdb(params):
from nailgun.db import syncdb
from nailgun.logger import logger
logger.info("Syncing database...")
syncdb()
logger.info("Done")
def action_dropdb(params):
from nailgun.db import dropdb
from nailgun.logger import logger
logger.info("Dropping database...")
dropdb()
logger.info("Done")
def action_migrate(params):
from nailgun.db.migration import action_migrate_alembic
action_migrate_alembic(params)
def action_test(params):
from nailgun.logger import logger
from nailgun.unit_test import TestRunner
logger.info("Running tests...")
TestRunner.run()
logger.info("Done")
def action_dbshell(params):
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
args = ['psql']
env = {}
if settings.DATABASE['passwd']:
env['PGPASSWORD'] = settings.DATABASE['passwd']
if settings.DATABASE['user']:
args += ["-U", settings.DATABASE['user']]
if settings.DATABASE['host']:
args.extend(["-h", settings.DATABASE['host']])
if settings.DATABASE['port']:
args.extend(["-p", str(settings.DATABASE['port'])])
args += [settings.DATABASE['name']]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvpe('psql', args, env)
def action_dump_settings(params):
from nailgun.settings import settings
sys.stdout.write(settings.dump())
def action_shell(params):
from nailgun.db import db
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
try:
from IPython import embed
embed()
except ImportError:
code.interact(local={'db': db, 'settings': settings})
def action_run(params):
from nailgun.settings import settings
settings.update({
'LISTEN_PORT': int(params.port),
'LISTEN_ADDRESS': params.address,
})
for attr in ['FAKE_TASKS', 'FAKE_TASKS_TICK_COUNT',
'FAKE_TASKS_TICK_INTERVAL', 'FAKE_TASKS_AMQP']:
param = getattr(params, attr.lower())
if param is not None:
settings.update({attr: param})
if params.authentication_method:
auth_method = params.authentication_method
settings.AUTH.update({'AUTHENTICATION_METHOD' : auth_method})
if params.config_file:
settings.update_from_file(params.config_file)
from nailgun.app import appstart
appstart()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
load_run_parsers(subparsers)
load_db_parsers(subparsers)
load_db_migrate_parsers(subparsers)
load_dbshell_parsers(subparsers)
load_test_parsers(subparsers)
load_shell_parsers(subparsers)
load_settings_parsers(subparsers)
params, other_params = parser.parse_known_args()
sys.argv.pop(1)
action = getattr(
__main__,
"action_{0}".format(params.action)
)
action(params) if action else parser.print_help()
|
{
"content_hash": "123df350d4fc9be433b69a9904e31508",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 76,
"avg_line_length": 27.54736842105263,
"alnum_prop": 0.6428480448350529,
"repo_name": "zhaochao/fuel-web",
"id": "fb78d89a485fb14ecf177e1a51b584b25d30ae53",
"size": "8508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nailgun/manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109800"
},
{
"name": "HTML",
"bytes": "16017"
},
{
"name": "JavaScript",
"bytes": "705662"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "3493678"
},
{
"name": "Ruby",
"bytes": "33590"
},
{
"name": "Shell",
"bytes": "26585"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0024_channel_modified'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='modified',
field=models.DateTimeField(auto_now=True),
),
]
|
{
"content_hash": "3f80fbca16a81cc9747542e7df759c2e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 21.11111111111111,
"alnum_prop": 0.5947368421052631,
"repo_name": "mozilla/airmozilla",
"id": "7d2e5f66e8c69fc5397ee94a0c911dcb61464979",
"size": "404",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airmozilla/main/migrations/0025_auto_20160630_2040.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4527"
},
{
"name": "Brightscript",
"bytes": "67473"
},
{
"name": "CSS",
"bytes": "140471"
},
{
"name": "HTML",
"bytes": "346961"
},
{
"name": "JavaScript",
"bytes": "1277145"
},
{
"name": "Makefile",
"bytes": "12447"
},
{
"name": "Python",
"bytes": "2149189"
},
{
"name": "Shell",
"bytes": "3103"
},
{
"name": "Smarty",
"bytes": "3010"
}
],
"symlink_target": ""
}
|
import argparse
import base64
import os
import random
import struct
import sys
def main():
parser = argparse.ArgumentParser(description='Generate random test cases for the base64 unit test.')
parser.add_argument('--count', default=1000, type=int, help='Number of test cases to generate')
parser.add_argument('--maxSize', default=1000, type=int, help='Maximum (binary) size of each test case')
parser.add_argument('--seed', default=1234, type=int, help='Seed for the RNG.')
parser.add_argument('--output', required=True, type=str, help='Output file')
args = parser.parse_args()
print("Generating {} test cases".format(args.count))
print("Setting the random seed to {}".format(args.seed))
random.seed(args.seed)
testCases = []
for i in range(args.count):
testCaseSize = random.randrange(0, args.maxSize)
testCaseBinary = os.urandom(testCaseSize)
assert(len(testCaseBinary) == testCaseSize)
testCases.append((testCaseBinary, base64.b64encode(testCaseBinary)))
assert(len(testCases) == args.count)
# Now dump the test data to file.
# File format:
# - Two bytes length of the binary string (little endian)
# - Binary string
# - Two bytes length of the encoded string (little endian, no null terminator)
# - Encoded string (no null terminator)
with open(args.output, 'wb') as f:
f.write(struct.pack('<H', len(testCases)))
for t in testCases:
f.write(struct.pack('<H', len(t[0])))
f.write(t[0])
base64Bytes = t[1]
f.write(struct.pack('<H', len(base64Bytes)))
f.write(base64Bytes)
print("Successfully dumped {} test cases.".format(len(testCases)))
if __name__=='__main__':
main()
|
{
"content_hash": "dc067a4c5ce90557fffeff6c9cb70e00",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 108,
"avg_line_length": 42.42857142857143,
"alnum_prop": 0.6487093153759821,
"repo_name": "dbitbox/mcu",
"id": "825773ebce8562c16409cb2ece94e1ea2bcdf300",
"size": "1805",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/generate_base64_testcases.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2818878"
},
{
"name": "C++",
"bytes": "460723"
},
{
"name": "CMake",
"bytes": "836"
},
{
"name": "Python",
"bytes": "1936"
},
{
"name": "Shell",
"bytes": "7358"
}
],
"symlink_target": ""
}
|
import os
import pickle
import numpy as np
import matplotlib.pyplot as pylab
import operator
from multiprocessing import Pool, Manager
import glob
from collections import Counter,defaultdict
from scipy.stats import norm
import logging
def bayes_selection_constnat_optimization(self):
removal_ratio=0.9
likelihood_list=[]
modified_likelihood_list=[]
sgRNA_number=[] # total sgRNA_number
for (tgid,tginst) in self.allgenedict.items():
#probability=tginst.sgrna_probability
likelihood= np.matrix(tginst.loglikelihood.reshape(tginst.nb_count.shape[0],tginst.nb_count.shape[1]))
likelihood_mean= np.matrix(np.mean(likelihood,axis=0))
#modified_likelihood_list+=[likelihood_mean.tolist()[0][i]-probability[i] for i in range(len(probability))]
modified_likelihood_list+=likelihood_mean.tolist()[0]
#probability_list+=probability
likelihood_list+=(likelihood_mean.tolist()[0])
sgRNA_number.append(len(tginst.sgrnaid))
# selection_constant iteration start point
sgRNA_number=[[sgRNA_number,count] for sgRNA_number,count in Counter(sgRNA_number).items()]
sgRNA_number.sort(key=operator.itemgetter(1),reverse=True)
common_sgRNA_number=sgRNA_number[0][0]
modified_likelihood_list.sort(reverse=True)
selection_constant=int(-modified_likelihood_list[int(len(modified_likelihood_list)*removal_ratio)]/(np.log(int(common_sgRNA_number*removal_ratio))-np.log(int(common_sgRNA_number*removal_ratio)-1)))
# log_precaculation
#log_list=defaultdict(list)
for i in range(1,max([i[0] for i in sgRNA_number])+1):
self.log_list[i]=[np.log(k+1) for k in range(i)]
# selection constant searching
constant_percentage={} # to record the percenge of on-target gRNAs given selection constant
while selection_constant not in list(constant_percentage.keys()):
total_sgRNA=0
total_on_target_sgRNA=0
for (tgid,tginst) in self.allgenedict.items():
likelihood= np.matrix(tginst.loglikelihood.reshape(tginst.nb_count.shape[0],tginst.nb_count.shape[1]))
likelihood_mean= np.matrix(np.mean(likelihood,axis=0)).tolist()
temp=likelihood_mean[0]
#probability=tginst.sgrna_probability
#temp=[temp[i]-probability[i] for i in range(len(temp))]
temp.sort(reverse=True)
temp_accumulate=[np.sum([k for k in temp[:(i+1)]]) for i in range(len(temp))]
modified_penalty=[selection_constant*i for i in self.log_list[len(tginst.sgrnaid)]]
temp_accumulate_log_penalty=[sum(x) for x in zip(temp_accumulate,modified_penalty)]
max_index=[i for i, j in enumerate(temp_accumulate_log_penalty) if j == max(temp_accumulate_log_penalty)]
total_sgRNA+=len(tginst.sgrnaid)
total_on_target_sgRNA+=(max_index[0]+1)
if abs(total_sgRNA-3000)<10:
break
constant_percentage[selection_constant]=float(total_on_target_sgRNA)/total_sgRNA
if float(total_on_target_sgRNA)/total_sgRNA>removal_ratio:
selection_constant-=1
else:
selection_constant+=1
if [i>removal_ratio for i in list(constant_percentage.values())]==[True]*len(list(constant_percentage.values())):
pass
elif [i>removal_ratio for i in list(constant_percentage.values())]==[False]*len(list(constant_percentage.values())):
pass
else:
break
selection_percentage=[[constant,abs(percentage-removal_ratio)] for constant,percentage in constant_percentage.items()]
selection_percentage.sort(key=operator.itemgetter(1))
selection_constant=selection_percentage[0][0]
self.selection_constant=selection_constant
logging.info(selection_constant)
def bayes_selection(tginst,log_list,selection_constant):
#likelihood_mean=np.mean(tginst.loglikelihood[i])
likelihood= np.matrix(tginst.loglikelihood.reshape(tginst.nb_count.shape[0],tginst.nb_count.shape[1]))
likelihood_mean= np.matrix(np.mean(likelihood,axis=0)).tolist()[0]
#temp=[[tginst.sgrnaid[i],likelihood_mean[0][i]-probability[i]] for i in range(len(tginst.sgrnaid))]
#logging.info(tginst.loglikelihood)
#logging.info(likelihood_mean)
#for i in range(len(tginst.sgrnaid)):
# logging.info(tginst.sgrnaid[i])
# logging.info(tginst.loglikelihood[i])
temp=[[tginst.sgrnaid[i],likelihood_mean[i]] for i in range(len(tginst.sgrnaid))]
temp.sort(key=operator.itemgetter(1),reverse=True)
#logging.info(temp)
temp_accumulate=[np.sum([k[1] for k in temp[:(i+1)]]) for i in range(len(temp))]
modified_penalty=[selection_constant*i for i in log_list[len(tginst.sgrnaid)]]
temp_accumulate_log_penalty=[sum(x) for x in zip(temp_accumulate,modified_penalty)]
#logging.info(temp_accumulate_log_penalty)
#if max(temp_accumulate_log_penalty)<0:
max_index=[i for i, j in enumerate(temp_accumulate_log_penalty) if j == max(temp_accumulate_log_penalty)]
#else:
# max_index=[i for i, j in enumerate(temp_accumulate_log_penalty) if j>*max(temp_accumulate_log_penalty)]
on_target_sgRNA=[temp[i][0] for i in range(max_index[-1]+1)]
tginst.eff_estimate=[1]*len(tginst.sgrnaid)
outliers_index=[orders for orders,sgRNA in enumerate(tginst.sgrnaid) if sgRNA not in on_target_sgRNA]
for i in outliers_index:
tginst.eff_estimate[i]=0
def background(self):
sgRNA_beta=dict()
sgRNA_probability=dict()
os.chdir(self.output_directory)
#os.chdir("/Users/chen-haochen/Documents/Data_storage/CRISPR/MAGECK/Tim/singlized_Tim.txt_mageck_bayes")
data=pickle.load(open("self_pre_dispersion_fitting_round.p",'rb'))
for (tgid,tginst) in data.allgenedict.items():
sgRNA_beta[tgid]=tginst.beta_estimate[1]
return(sgRNA_beta)
|
{
"content_hash": "065de51da21e286088339a9fe0803e70",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 201,
"avg_line_length": 51.275862068965516,
"alnum_prop": 0.6792199058507061,
"repo_name": "knightjdr/screenhits",
"id": "34865291a6702bca118be0a426d040d08e69f96a",
"size": "5948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/app/scripts/CRISPR/MAGeCK/v0.01/lib/python2.7/site-packages/mageck/bayes_selection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1425"
},
{
"name": "JavaScript",
"bytes": "1377219"
},
{
"name": "Perl",
"bytes": "5829"
},
{
"name": "Perl 6",
"bytes": "809"
},
{
"name": "Python",
"bytes": "335550"
}
],
"symlink_target": ""
}
|
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class PhoneNumberList(ListResource):
def __init__(self, version):
"""
Initialize the PhoneNumberList
:param Version version: Version that contains the resource
:returns: twilio.rest.routes.v2.phone_number.PhoneNumberList
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberList
"""
super(PhoneNumberList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, phone_number):
"""
Constructs a PhoneNumberContext
:param phone_number: The phone number
:returns: twilio.rest.routes.v2.phone_number.PhoneNumberContext
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberContext
"""
return PhoneNumberContext(self._version, phone_number=phone_number, )
def __call__(self, phone_number):
"""
Constructs a PhoneNumberContext
:param phone_number: The phone number
:returns: twilio.rest.routes.v2.phone_number.PhoneNumberContext
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberContext
"""
return PhoneNumberContext(self._version, phone_number=phone_number, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Routes.V2.PhoneNumberList>'
class PhoneNumberPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the PhoneNumberPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.routes.v2.phone_number.PhoneNumberPage
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberPage
"""
super(PhoneNumberPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of PhoneNumberInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.routes.v2.phone_number.PhoneNumberInstance
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberInstance
"""
return PhoneNumberInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Routes.V2.PhoneNumberPage>'
class PhoneNumberContext(InstanceContext):
def __init__(self, version, phone_number):
"""
Initialize the PhoneNumberContext
:param Version version: Version that contains the resource
:param phone_number: The phone number
:returns: twilio.rest.routes.v2.phone_number.PhoneNumberContext
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberContext
"""
super(PhoneNumberContext, self).__init__(version)
# Path Solution
self._solution = {'phone_number': phone_number, }
self._uri = '/PhoneNumbers/{phone_number}'.format(**self._solution)
def update(self, voice_region=values.unset, friendly_name=values.unset):
"""
Update the PhoneNumberInstance
:param unicode voice_region: The Inbound Processing Region used for this phone number for voice
:param unicode friendly_name: A human readable description of this resource.
:returns: The updated PhoneNumberInstance
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberInstance
"""
data = values.of({'VoiceRegion': voice_region, 'FriendlyName': friendly_name, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return PhoneNumberInstance(self._version, payload, phone_number=self._solution['phone_number'], )
def fetch(self):
"""
Fetch the PhoneNumberInstance
:returns: The fetched PhoneNumberInstance
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return PhoneNumberInstance(self._version, payload, phone_number=self._solution['phone_number'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Routes.V2.PhoneNumberContext {}>'.format(context)
class PhoneNumberInstance(InstanceResource):
def __init__(self, version, payload, phone_number=None):
"""
Initialize the PhoneNumberInstance
:returns: twilio.rest.routes.v2.phone_number.PhoneNumberInstance
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberInstance
"""
super(PhoneNumberInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'phone_number': payload.get('phone_number'),
'url': payload.get('url'),
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'friendly_name': payload.get('friendly_name'),
'voice_region': payload.get('voice_region'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
}
# Context
self._context = None
self._solution = {'phone_number': phone_number or self._properties['phone_number'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberContext
"""
if self._context is None:
self._context = PhoneNumberContext(self._version, phone_number=self._solution['phone_number'], )
return self._context
@property
def phone_number(self):
"""
:returns: The phone number
:rtype: unicode
"""
return self._properties['phone_number']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
@property
def sid(self):
"""
:returns: A string that uniquely identifies the Inbound Processing Region assignments for this phone number.
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: Account Sid.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: A human readable description of the Inbound Processing Region assignments for this phone number.
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def voice_region(self):
"""
:returns: The Inbound Processing Region used for this phone number for voice.
:rtype: unicode
"""
return self._properties['voice_region']
@property
def date_created(self):
"""
:returns: The date that this phone number was assigned an Inbound Processing Region.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date that the Inbound Processing Region was updated for this phone number.
:rtype: datetime
"""
return self._properties['date_updated']
def update(self, voice_region=values.unset, friendly_name=values.unset):
"""
Update the PhoneNumberInstance
:param unicode voice_region: The Inbound Processing Region used for this phone number for voice
:param unicode friendly_name: A human readable description of this resource.
:returns: The updated PhoneNumberInstance
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberInstance
"""
return self._proxy.update(voice_region=voice_region, friendly_name=friendly_name, )
def fetch(self):
"""
Fetch the PhoneNumberInstance
:returns: The fetched PhoneNumberInstance
:rtype: twilio.rest.routes.v2.phone_number.PhoneNumberInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Routes.V2.PhoneNumberInstance {}>'.format(context)
|
{
"content_hash": "3199b354be6e919665d56a61383b6550",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 116,
"avg_line_length": 32.24742268041237,
"alnum_prop": 0.6304347826086957,
"repo_name": "twilio/twilio-python",
"id": "222959bcfa75340cb672479f3ee9bd4e82dcb293",
"size": "9399",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/routes/v2/phone_number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
}
|
"""
**Name**
pyzombied — Start pyzombie server.
**Synopsis**
``pyzombied.py [options]``
**Description**
The ``pyzombied.py`` command shall start the *pyzombie* HTTP RESTful
server.
**Options**
``--home``
Home directory to override $PYZOMBIEHOME.
``--config``
Configuration file. Default: ``$PYZOMBIEHOME/etc/pyzombie.conf``.
``--deamon``
Start pyzombie as a deamon under current user.
``--port``
Start pyzombie listening on this port. Default: 8008.
``--verbose``
Change default logging verbosity: ``critical``, ``error``,
``warning``, ``info``, ``debug``.
**Environment**
PYZOMBIEHOME
- pyzombie's home directory.
- Default: current working directory.
- Demon Mode: current user's home directory or empty if user is root.
**Directories and Files**
``$PYZOMBIEHOME/etc/pyzombie.conf``
Configuration file.
``$PYZOMBIEHOME/var/run/pyzombie.pid``
File that contains the current pyzombie process id.
``$PYZOMBIEHOME/var/log/pyzombie``
Directory that contains pyzombie log files.
``$PYZOMBIEHOME/var/spool/pyzombie``
Directory that contains executions waiting to run.
``$PYZOMBIEHOME/tmp/pyzombie``
Directory to contain temporary files.
**Configuration**
[pyzombie]
``address``
The server address or DNS name: default localhost.
``port``
The TCP/IP port to listen: default 8008.
[pyzombie_filesystem]
``var``
The variable data root directory: default /var
[loggers]
``root``
Required
``zombie``
Required
[handlers]
[formatters]
"""
__author__ = ('Lance Finn Helsten',)
__version__ = '1.0.1'
__copyright__ = """Copyright 2009 Lance Finn Helsten (helsten@acm.org)"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "reStructuredText en"
import sys
if sys.version_info < (3, 0):
raise Exception("{0} Requires Python 3.0 or higher.".format(sys.argv[0]))
import os
import errno
import io
import configparser
import logging
from optparse import OptionParser
import pyzombie
###
### Functions
###
def resolvepath(path):
"""Fully resolve the given path into an absolute path taking into account,
the user, variables, etc.
"""
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.abspath(path)
return path
### Parse the arguments
parser = OptionParser(
description='pyzombie service',
version='%%prog %s' % (__version__,),
usage='usage: %prog [options]')
parser.add_option('', '--home',
action='store', type='string', dest='home', default=None,
help='Home directory to override $PYZOMBIEHOME.')
parser.add_option('', '--config',
action='store', type='string', dest='config', default=None,
help='Configuration file. Default: $PYZOMBIEHOME/etc/pyzombie.conf')
parser.add_option('', '--deamon',
action='store_true', dest='deamon', default=False,
help='Start pyzombie as a deamon under current user.')
parser.add_option('', '--port',
action='store', type='string', dest='port', default=None,
help='TCP port pyzombie will listen (default: 8008).')
parser.add_option('', '--verbose',
action='store', type='string', dest='verbose', default=None,
help='Change default logging verbosity: critical, error, warning, info, debug.')
options, args = parser.parse_args()
###
### Environment
###
if 'PYZOMBIEHOME' in os.environ:
os.environ['PYZOMBIEHOME'] = os.environ['PYZOMBIEHOME']
else:
if options.deamon:
if os.environ['USER'] == 'root':
os.environ['PYZOMBIEHOME'] = '/'
else:
os.environ['PYZOMBIEHOME'] = os.environ['HOME']
else:
os.environ['PYZOMBIEHOME'] = os.curdir
if not os.path.isdir(resolvepath(os.environ['PYZOMBIEHOME'])):
print("""$PYZOMBIEHOME="{0[PYZOMBIEHOME]}" does not exist or is not a directory.""".format(os.environ), file=sys.stderr)
sys.exit(1)
if not options.config:
options.config = os.path.join(resolvepath(os.environ['PYZOMBIEHOME']), 'etc', 'pyzombie.conf')
print("Configuration:", options.config)
pyzombie.ZombieConfig.config.read(options.config)
if options.port:
pyzombie.ZombieConfig.config.set('pyzombie', 'port', options.port)
if options.verbose:
pyzombie.ZombieConfig.config.set('logger_zombie', 'level', options.verbose.upper())
###
### Setup logging configuration
###
try:
logconf = io.StringIO()
pyzombie.ZombieConfig.config.write(logconf)
logconf.seek(0)
logging.config.fileConfig(logconf)
except configparser.NoSectionError:
logging.config.fileConfig(io.StringIO(CONFIG_INIT))
logging.getLogger("zombie").setLevel(logging.INFO)
logging.getLogger().info("Using default logging configuration.")
logging.getLogger().info("Logging initialized.")
### Start the zombie
try:
zombie = pyzombie.ZombieServer()
zombie.start()
except KeyboardInterrupt:
print("User cancel.")
|
{
"content_hash": "79a0a212b849efd84407d794171ada03",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 124,
"avg_line_length": 29.296875,
"alnum_prop": 0.6583111111111111,
"repo_name": "lanhel/pyzombie",
"id": "39e2ba68a1fb393aa182bcb3714984a1a540bc38",
"size": "5754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzombied.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6328"
},
{
"name": "HTML",
"bytes": "2952"
},
{
"name": "Makefile",
"bytes": "4623"
},
{
"name": "Python",
"bytes": "166775"
}
],
"symlink_target": ""
}
|
import json
import datetime
import six
import dateutil.parser
class JSONWithDatetimeEncoder(json.JSONEncoder):
"""Create JSON string as json.JSONEncoder, convert datetime.datetime objects to ISO format string."""
def default(self, o):
o = o.isoformat() if isinstance(o, datetime.datetime) else json.JSONEncoder.default(self, o)
return o
class JSONWithDatetimeDecoder(json.JSONDecoder):
"""Parse JSON string as json.JSONDecoder, matched strings convert to datetime.datetime."""
def decode(self, s):
o = json.JSONDecoder.decode(self, s)
return self.handleObj(o)
def handleObj(self, o):
if isinstance(o, dict):
keys = list(o.keys())
for key in keys:
val = o[key]
o[key] = self.handleObj(val)
return o
if isinstance(o, list):
c = []
for val in o:
c.append(self.handleObj(val))
return c
if isinstance(o, six.string_types):
try:
return dateutil.parser.parse(o)
except:
pass
return o
|
{
"content_hash": "8d02867addc6475e1b2281ffa92e4dab",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 105,
"avg_line_length": 30.05263157894737,
"alnum_prop": 0.5840630472854641,
"repo_name": "postmen/postmen-sdk-python",
"id": "4310a6a30c6c37ad49811c857f026998be153318",
"size": "1142",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "postmen/jsont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35751"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "delphi.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "ed810af8bc0f99209e580ac5efbb143a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "VulcanoAhab/delphi",
"id": "204aafe8fab9182bd79f967ba5df8e946e322525",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2786"
},
{
"name": "HTML",
"bytes": "131"
},
{
"name": "Python",
"bytes": "124776"
},
{
"name": "Shell",
"bytes": "3298"
}
],
"symlink_target": ""
}
|
import os
import warnings
from dataset.persistence.database import Database
from dataset.persistence.table import Table
from dataset.persistence.util import row_type
from dataset.freeze.app import freeze
# shut up useless SA warning:
warnings.filterwarnings(
'ignore', 'Unicode type received non-unicode bind param value.')
warnings.filterwarnings(
'ignore', 'Skipping unsupported ALTER for creation of implicit constraint')
__all__ = ['Database', 'Table', 'freeze', 'connect']
def connect(url=None, schema=None, reflect_metadata=True, engine_kwargs=None,
reflect_views=True, row_type=row_type):
""" Opens a new connection to a database.
*url* can be any valid `SQLAlchemy engine URL`_. If *url* is not defined
it will try to use *DATABASE_URL* from environment variable. Returns an
instance of :py:class:`Database <dataset.Database>`. Set *reflect_metadata*
to False if you don't want the entire database schema to be pre-loaded.
This significantly speeds up connecting to large databases with lots of
tables. *reflect_views* can be set to False if you don't want views to be
loaded. Additionally, *engine_kwargs* will be directly passed to
SQLAlchemy, e.g. set *engine_kwargs={'pool_recycle': 3600}* will avoid `DB
connection timeout`_. Set *row_type* to an alternate dict-like class to
change the type of container rows are stored in.::
db = dataset.connect('sqlite:///factbook.db')
.. _SQLAlchemy Engine URL: http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine
.. _DB connection timeout: http://docs.sqlalchemy.org/en/latest/core/pooling.html#setting-pool-recycle
"""
if url is None:
url = os.environ.get('DATABASE_URL', 'sqlite://')
return Database(url, schema=schema, reflect_metadata=reflect_metadata,
engine_kwargs=engine_kwargs, reflect_views=reflect_views,
row_type=row_type)
|
{
"content_hash": "01eeb26a25de5e529896731139d750f6",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 110,
"avg_line_length": 46.80952380952381,
"alnum_prop": 0.7131230925737538,
"repo_name": "twds/dataset",
"id": "0a1556c18271d16e716fac4298ca32d2d5527b45",
"size": "1966",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dataset/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67534"
}
],
"symlink_target": ""
}
|
"""A server wrapper around dealer operations."""
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import logging
from .bank import Bank
QUERIES = {
"is_done": (lambda bank: bank.is_done(), True),
"get_output_path": (lambda bank: bank.output_path, None),
"get_header": (lambda bank: bank.header, ""),
"get_feature": (lambda bank: bank.feature, ""),
}
class BankServer(SimpleXMLRPCServer, object):
"""RPC command server."""
allow_reuse_address = True
def __init__(self, host, port, banks):
super(BankServer, self).__init__(
(host, port),
SimpleXMLRPCRequestHandler,
logRequests = False)
self.__banks = [Bank(path) for path in banks]
self.__assigned = {}
self.__log = logging.getLogger(__name__)
self.register_function(self.is_fresh, "is_fresh")
self.register_function(self.get_next_scenario, "get_next_scenario")
for (name, (callback, default)) in QUERIES.iteritems():
self.register_function(self.__query_bank(callback, default), name)
def serve_forever(self, poll_interval = 0.5):
"""Start serving."""
(address, port) = self.server_address
self.__log.info("Server started on %s:%d", address, port)
super(BankServer, self).serve_forever(poll_interval)
def shutdown(self):
"""Stop serving."""
self.__log.info("Stopped serving")
super(BankServer, self).shutdown()
def is_fresh(self, client):
"""Returns whether the current bank is fresh.
This functions always returns True as long as the client was not assigned a bank.
"""
if client not in self.__assigned:
return True
else:
query = self.__query_bank(lambda bank: bank.is_fresh(), False)
return query(client)
def get_next_scenario(self, client):
"""Returns the next scenario to deal to the client.
This functions also assigns the bank to the client.
"""
bank = self.__get_current_bank(client)
if not bank:
self.__log.debug("No more scenarios for '%s'", client)
return None
if client not in self.__assigned:
self.__log.info("Assigning '%s' to '%s'", bank.feature.splitlines()[0], client)
self.__assigned[client] = bank
scenario = bank.get_next_scenario()
self.__log.info("Sent '%s' to '%s'", scenario.lstrip(), client)
return scenario
def __query_bank(self, get_value, default):
"""Returns a callback to query the current bank's property."""
def query(client):
# pylint: disable=missing-docstring
bank = self.__get_current_bank(client)
if not bank:
return default
return get_value(bank)
return query
def __get_current_bank(self, client):
"""Returns the first bank that isn't done yet, None otherwise."""
# If client was already assigned a bank, check it.
if client in self.__assigned:
bank = self.__assigned[client]
# If bank isn't done, deal from it.
if not bank.is_done():
return bank
# Bank is done. Unassign it and look for the next one.
self.__log.info("Unassigning '%s' from '%s'", bank.feature.splitlines()[0], client)
self.__assigned.pop(client)
for bank in self.__banks:
# Skip finished banks.
if bank.is_done():
continue
# Skip banks which were already assigned.
if bank in self.__assigned.itervalues():
continue
# Found a bank.
return bank
# No bank was found.
return None
|
{
"content_hash": "99cd210d2377950215bf2b00d06c2ea1",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 95,
"avg_line_length": 33.80530973451327,
"alnum_prop": 0.581413612565445,
"repo_name": "nivbend/bdd_bot",
"id": "f1a262faee176e0c5fe7b5823a1d90715f0c96bc",
"size": "3820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bddbot/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "34134"
},
{
"name": "Python",
"bytes": "100120"
}
],
"symlink_target": ""
}
|
import sys
import time
import serial
import serial.tools.list_ports
from SerialCrypt import Devices
# variables
SERIAL_BD = 9600
SERIAL_PORT = None
SERIAL_TIMEOUT = 5
# verify the arduino is connected
for port in serial.tools.list_ports.comports():
if port[2][:len(Devices.DEVICE_DEBUG_ID)] == Devices.DEVICE_DEBUG_ID:
SERIAL_PORT = port[0]
break
try:
COM_SERIAL = serial.Serial(SERIAL_PORT,SERIAL_BD,timeout=SERIAL_TIMEOUT)
COM_SERIAL.setDTR(False)
COM_SERIAL.setRTS(True)
COM_SERIAL.flushInput()
COM_SERIAL.flushOutput()
COM_SERIAL.setRTS(False)
COM_SERIAL.setDTR(True)
except Exception,e:
print e
try:
while True:
byte = COM_SERIAL.read()
if len(byte) > 0:
sys.stdout.write(byte)
except:
pass
|
{
"content_hash": "6bea62f07a63c697a06b0bd377744ac6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 73,
"avg_line_length": 20.083333333333332,
"alnum_prop": 0.7413554633471646,
"repo_name": "sch3m4/SerialCrypt",
"id": "36463794fbfb863fa367987e62a8795771aa5e0e",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/debug.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "6418"
},
{
"name": "C",
"bytes": "44595"
},
{
"name": "C++",
"bytes": "41909"
},
{
"name": "Python",
"bytes": "13910"
}
],
"symlink_target": ""
}
|
import warnings
from pandas import DateOffset, DatetimeIndex, Series, Timestamp
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU # noqa
from pandas.tseries.offsets import Easter, Day
import numpy as np
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from pandas import DateOffset
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = Timestamp(
start_date) if start_date is not None else start_date
self.end_date = Timestamp(
end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year=%s, ' % self.year
info += 'month=%s, day=%s, ' % (self.month, self.day)
if self.offset is not None:
info += 'offset=%s' % self.offset
if self.observance is not None:
info += 'observance=%s' % self.observance
repr = 'Holiday: %s (%s)' % (self.name, info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
self.days_of_week)]
if self.start_date is not None:
filter_start_date = max(self.start_date.tz_localize(
filter_start_date.tz), filter_start_date)
if self.end_date is not None:
filter_end_date = min(self.end_date.tz_localize(
filter_end_date.tz), filter_end_date)
holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
(holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day))
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day))
# Don't process unnecessary holidays
dates = DatetimeIndex(start=reference_start_date,
end=reference_end_date,
freq=year_offset, tz=start_date.tz)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings(record=True):
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super(HolidayCalendarMetaClass, cls).__new__(
cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
@add_metaclass(HolidayCalendarMetaClass)
class AbstractHolidayCalendar(object):
"""
Abstract interface to create holidays following certain rules.
"""
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super(AbstractHolidayCalendar, self).__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar %s does not have any '
'rules specified' % self.name)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if (self._cache is None or start < self._cache[0] or
end > self._cache[1]):
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except:
pass
if not isinstance(other, list):
other = [other]
other_holidays = dict((holiday.name, holiday) for holiday in other)
try:
base = base.rules
except:
pass
if not isinstance(base, list):
base = [base]
base_holidays = dict([(holiday.name, holiday) for holiday in base])
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=31,
offset=DateOffset(weekday=MO(-1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.',
start_date=datetime(1986, 1, 1), month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1,
offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other,
base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
|
{
"content_hash": "6a2df3f87a0ea97d9e49cce6b64ccada",
"timestamp": "",
"source": "github",
"line_count": 509,
"max_line_length": 79,
"avg_line_length": 31.781925343811395,
"alnum_prop": 0.5806391790814118,
"repo_name": "louisLouL/pair_trading",
"id": "9acb52ebe0e9f25f0eaefbb3524240ef02170b43",
"size": "16177",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "capstone_env/lib/python3.6/site-packages/pandas/tseries/holiday.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "148513"
},
{
"name": "C++",
"bytes": "172384"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "HTML",
"bytes": "568460"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Python",
"bytes": "30357437"
},
{
"name": "Shell",
"bytes": "3260"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
from socket import timeout as socket_timeout # noqa
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
USERS_INDEX_URL = reverse('horizon:admin:users:index')
USER_CREATE_URL = reverse('horizon:admin:users:create')
USER_UPDATE_URL = reverse('horizon:admin:users:update', args=[1])
class UsersViewTests(test.BaseAdminViewTests):
def _get_domain_id(self):
return self.request.session.get('domain_context', None)
def _get_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
@test.create_stubs({api.keystone: ('user_list',)})
def test_index(self):
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
res = self.client.get(USERS_INDEX_URL)
self.assertTemplateUsed(res, 'admin/users/index.html')
self.assertItemsEqual(res.context['table'].data, users)
if domain_id:
for user in res.context['table'].data:
self.assertItemsEqual(user.domain_id, domain_id)
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('user_create',
'tenant_list',
'add_tenant_user_role',
'get_default_role',
'role_list')})
def test_create(self):
user = self.users.get(id="1")
domain_id = self._get_domain_id()
role = self.roles.first()
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_stubs({api.keystone: ('tenant_list',
'role_list',
'get_default_role')})
def test_create_with_password_mismatch(self):
user = self.users.get(id="1")
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': "doesntmatch"}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_short(self):
user = self.users.get(id="1")
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'name': user.name,
'email': user.email,
'password': 'four',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'four'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_long(self):
user = self.users.get(id="1")
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'name': user.name,
'email': user.email,
'password': 'MoreThanEighteenChars',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'MoreThanEighteenChars'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_get',
'tenant_list',
'user_update_tenant',
'user_update_password',
'user_update',
'roles_for_user', )})
def test_update(self):
user = self.users.get(id="1")
test_password = 'normalpwd'
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.tenant_list(IgnoreArg(), user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=u'test@example.com',
name=u'test_user',
password=test_password,
project=self.tenant.id).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': test_password,
'project': self.tenant.id,
'confirm_password': test_password}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'tenant_list',
'user_update_tenant',
'keystone_can_edit_user',
'roles_for_user', )})
def test_update_with_keystone_can_edit_user_false(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest),
'1',
admin=True).AndReturn(user)
api.keystone.tenant_list(IgnoreArg(), user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.keystone_can_edit_user().AndReturn(False)
api.keystone.keystone_can_edit_user().AndReturn(False)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'project': self.tenant.id, }
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
@test.create_stubs({api.keystone: ('user_get', 'tenant_list')})
def test_update_validation_for_password_too_short(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.tenant_list(IgnoreArg(), user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': 't',
'project': self.tenant.id,
'confirm_password': 't'}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_get', 'tenant_list')})
def test_update_validation_for_password_too_long(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.tenant_list(IgnoreArg(), user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': 'ThisIsASuperLongPassword',
'project': self.tenant.id,
'confirm_password': 'ThisIsASuperLongPassword'}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_enable_user(self):
user = self.users.get(id="2")
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id).AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
True).AndReturn(user)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_disable_user(self):
user = self.users.get(id="2")
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
self.assertTrue(user.enabled)
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
False).AndReturn(user)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_enable_disable_user_exception(self):
user = self.users.get(id="2")
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(), user.id, True) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_list',)})
def test_disabling_current_user(self):
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You cannot disable the user you are currently '
u'logged in as.')
@test.create_stubs({api.keystone: ('user_list',)})
def test_delete_user_with_improper_permissions(self):
domain_id = self._get_domain_id()
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__delete__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You do not have permission to delete user: %s'
% self.request.user.username)
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs({api.keystone: ('tenant_list',
'get_default_role',
'role_list',
'user_list')})
def test_modal_create_user_with_passwords_not_matching(self):
api.keystone.tenant_list(IgnoreArg(), user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.user_list(IgnoreArg(), domain=None) \
.AndReturn(self.users.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USERS_INDEX_URL))
# Open the modal menu
self.selenium.find_element_by_id("users__action_create") \
.send_keys("\n")
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_id("id_name"))
body = self.selenium.find_element_by_tag_name("body")
self.assertFalse("Passwords do not match" in body.text,
"Error message should not be visible at loading time")
self.selenium.find_element_by_id("id_name").send_keys("Test User")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").send_keys("a@b.com")
body = self.selenium.find_element_by_tag_name("body")
self.assertTrue("Passwords do not match" in body.text,
"Error message not found in body")
@test.create_stubs({api.keystone: ('tenant_list', 'user_get')})
def test_update_user_with_passwords_not_matching(self):
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(self.user)
api.keystone.tenant_list(IgnoreArg(), user=self.user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USER_UPDATE_URL))
body = self.selenium.find_element_by_tag_name("body")
self.assertFalse("Passwords do not match" in body.text,
"Error message should not be visible at loading time")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").clear()
body = self.selenium.find_element_by_tag_name("body")
self.assertTrue("Passwords do not match" in body.text,
"Error message not found in body")
|
{
"content_hash": "10410528774711f82e30bcfc99cd3837",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 79,
"avg_line_length": 40.5316742081448,
"alnum_prop": 0.537259279933017,
"repo_name": "deepakselvaraj/federated-horizon",
"id": "97a6cf3ddf740cd409094ca050a2fe5879c537f1",
"size": "18724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/users/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import pygame
from pygame.locals import *
class Viewport(pygame.sprite.Sprite):
def __init__(self, parent, viewport_rect, canvas_w, canvas_h):
super(Viewport, self).__init__()
self.parent = parent
self.resize(viewport_rect)
self.canvas = pygame.Surface((canvas_w, canvas_h)).convert()
self.drag_offset = [0, 0]
def resize(self, viewport_rect):
'''Resize the viewport'''
self.image = pygame.Surface((viewport_rect.w, viewport_rect.h)).convert()
self.rect = viewport_rect
@property
def canvas_w(self):
return self.canvas.get_width()
@canvas_w.setter
def canvas_w(self, val):
self.canvas = pygame.Surface((val, self.canvas.get_height())).convert()
@property
def canvas_h(self):
return self.canvas.get_height()
@canvas_h.setter
def canvas_h(self, val):
self.canvas = pygame.Surface((self.canvas.get_width(), val)).convert()
def ondrag(self, rel):
size = self.canvas_w, self.canvas_h
for i in (0, 1): # x, y
self.drag_offset[i] = self.drag_offset[i] + rel[i]
if self.drag_offset[i] > 0:
self.drag_offset[i] = 0
if self.drag_offset[i] < - size[i] + self.rect.size[i]:
self.drag_offset[i] = - size[i] + self.rect.size[i];
|
{
"content_hash": "77b99e872c866d5e849f8b077a5a8c55",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 81,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.5811209439528023,
"repo_name": "flexo/evolutron",
"id": "2a7167027501673e8436b9c058cc271bb24d37e3",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evolutron/viewport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76959"
},
{
"name": "Shell",
"bytes": "263"
}
],
"symlink_target": ""
}
|
import sys, time, mysql.connector, geopy.point, geopy.distance, json
from geopy.point import Point
class SettingDB:
def __init__(self, uname, pw, table_name="setting", db_name="doublefault"):
self.table_name = table_name
self.db_name = db_name
self.db_user = uname
self.db_pw = pw
self.db = None
pass
def open_db(self):
self.db = mysql.connector.connect(user=self.db_user, password=self.db_pw, database=self.db_name)
pass
def close_db(self):
self.db.close()
self.db = None
pass
def drop_table(self):
self.open_db()
dbh = self.db.cursor()
dbh.execute("drop table if exists %s" % self.table_name)
dbh.close()
self.db.commit()
self.close_db()
pass
def create_table(self):
self.open_db()
dbh = self.db.cursor()
sql = '''
create table if not exists {table}
( settingid int auto_increment primary key,
discordid char(64) not null unique,
pm_channel char(32) not null unique,
on_off char(1) not null default 'y',
address varchar(255),
coordinates char(64),
distance int not null default 1500
) engine=MyISAM'''.format(table=self.table_name)
dbh.execute(sql)
dbh.close()
self.db.commit()
self.close_db()
pass
def put_user_data(self, user):
self.open_db()
dbh = self.db.cursor()
query = "insert into {table} (discordid, pm_channel, on_off, address, coordinates, distance) values ".format(table=self.table_name)
values = "('{u.discordid}', '{u.pm_channel}', '{u.on_off}', '{u.address}', '{u.coordinates.latitude},{u.coordinates.longitude}', {u.distance})".format(u=user)
update = " on duplicate key update on_off='{u.on_off}', address='{u.address}', coordinates='{u.coordinates.latitude},{u.coordinates.longitude}', distance='{u.distance}'".format(u=user)
sql = query + values + update
dbh.execute(sql)
sql2 = "select settingid from {table} where discordid = '{u.discordid}'".format(u=user, table=self.table_name)
dbh.execute(sql2)
row = dbh.fetchone()
if row != None:
user.surrogateid = row[0]
pass
self.db.commit()
dbh.close()
self.close_db()
pass
def set_enable(self, discordid, on_off):
self.open_db()
dbh = self.db.cursor()
sql = "update {table} set on_off = '{on_off}' where discordid = '{discordid}'".format(table=self.table_name,
on_off=on_off,
discordid=discordid)
dbh.execute(sql)
self.db.commit()
dbh.close()
self.close_db()
pass
def purge(self, discordid):
self.open_db()
dbh = self.db.cursor()
sql = "delete from {table} where discordid = '{discordid}'".format(table=self.table_name, discordid=discordid)
dbh.execute(sql)
self.db.commit()
dbh.close()
self.close_db()
pass
pass
def test():
from subscriber import Subscriber
account = json.load(open("/var/lib/doublefault/account.json"))
db = SettingDB(account["db-username"], account["db-password"], table_name="test")
db.drop_table()
db.create_table()
db.put_user_data(Subscriber("user1", address="10.0001,10.0001"))
db.put_user_data(Subscriber("user2", address="10.0002,10.0002"))
db.put_user_data(Subscriber("user3", address="10.0003,10.0003"))
db.put_user_data(Subscriber("user4", address="10.0004,10.0004"))
db.put_user_data(Subscriber("user5", address="37 Wellington St., Arlington MA"))
subsc = db.find_user("user1")
print ("ID: {u.surrogateid} address: {u.address}, coord: {u.coordinates.latitude},{u.coordinates.longitude}".format(u=subsc))
db.put_user_data(Subscriber("user1", address="20.0001,20.0001"))
for user in ["user1", "user2", "user3", "user4", "user5", "user6"]:
subsc = db.find_user(user)
if subsc:
print ("ID: {u.surrogateid} address: {u.address}, coord: {u.coordinates.latitude},{u.coordinates.longitude}".format(u=subsc))
pass
else:
print ("no %s" % user)
pass
pass
pass
if __name__ == "__main__":
test()
pass
|
{
"content_hash": "8f7a5b10e4ed898f658be10866cd7964",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 192,
"avg_line_length": 32.44604316546763,
"alnum_prop": 0.5674057649667406,
"repo_name": "AimForTheAce/BostonMysticsBot",
"id": "929c9b4992d896878de3a55348b66150bef4d162",
"size": "4511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settingdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "482"
},
{
"name": "Python",
"bytes": "72702"
}
],
"symlink_target": ""
}
|
"""
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one relation:
``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
2. Related instance on the forward side of a one-to-one
relation: ``ForwardOneToOneDescriptor``.
It avoids querying the database when accessing the parent link field in
a multi-table inheritance scenario.
3. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
4. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
5. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from operator import attrgetter
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor:
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one (via ForwardOneToOneDescriptor subclass) relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
'RelatedObjectDoesNotExist',
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
return self.field.remote_field.model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def get_object(self, instance):
qs = self.get_queryset(instance=instance)
# Assuming the database enforces foreign keys, this won't fail.
return qs.get(self.field.get_reverse_related_filter(instance))
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
rel_obj = self.get_object(instance)
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` is the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):
"""
Accessor to the related object on the forward side of a one-to-one relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance.
"""
def get_object(self, instance):
if self.field.remote_field.parent_link:
deferred = instance.get_deferred_fields()
# Because it's a parent link, all the data is available in the
# instance, so populate the parent model with this data.
rel_model = self.field.remote_field.model
fields = [field.attname for field in rel_model._meta.concrete_fields]
# If any of the related model's fields are deferred, fallback to
# fetching all fields from the related model. This avoids a query
# on the related model for every deferred field.
if not any(field in fields for field in deferred):
kwargs = {field: getattr(instance, field) for field in fields}
return rel_model(**kwargs)
return super(ForwardOneToOneDescriptor, self).get_object(instance)
class ReverseOneToOneDescriptor:
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
'RelatedObjectDoesNotExist',
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
return self.related.related_model._base_manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
def instance_attr(obj):
return obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``cls`` is the ``Place`` class (unused)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` is the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
pass
else:
delattr(instance, self.cache_name)
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid an SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseManyToOneDescriptor:
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model
return create_reverse_many_to_one_manager(
related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``cls`` is the ``Parent`` class (unused)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def _get_set_deprecation_msg_params(self):
return (
'reverse side of a related set',
self.rel.get_accessor_name(),
)
def __set__(self, instance, value):
raise TypeError(
'Direct assignment to the %s is prohibited. Use %s.set() instead.'
% self._get_set_deprecation_msg_params(),
)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return queryset.none()
queryset._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return queryset
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.field.related_query_name())
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
queryset = super(RelatedManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
self._remove_prefetched_objects()
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super(ManyToManyDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
related_model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def _get_set_deprecation_msg_params(self):
return (
'%s side of a many-to-many set' % ('reverse' if self.reverse else 'forward'),
self.rel.get_accessor_name() if self.reverse else self.field.name,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.pk_field_names[self.source_field_name]))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super(ManyRelatedManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(
sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (
self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj
)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(
sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db,
)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(
sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db,
)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(
sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
return ManyRelatedManager
|
{
"content_hash": "63bfa929645d97ab8ccef3ae627f9a1a",
"timestamp": "",
"source": "github",
"line_count": 1109,
"max_line_length": 119,
"avg_line_length": 44.12714156898107,
"alnum_prop": 0.5743711302286614,
"repo_name": "twz915/django",
"id": "a878a79acde1179e03f0ca8a46118d546dbd9448",
"size": "48937",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/models/fields/related_descriptors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55929"
},
{
"name": "HTML",
"bytes": "182880"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11852079"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=800, unique=True)),
('watched', models.IntegerField(default=1)),
('watched_full', models.BooleanField(default=True)),
('rating', models.IntegerField()),
('source', models.CharField(max_length=500, null=True)),
('imdb_rating', models.IntegerField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='MovieType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movie_type', models.CharField(choices=[('Act', 'Action'), ('Ani', 'Animation'), ('Com', 'Comedy'), ('Doc', 'Documentary'), ('Fam', 'Family'), ('FN', 'Film-Noir'), ('Hor', 'Horror'), ('Mus', 'Musical'), ('Rom', 'Romance'), ('Spo', 'Sport'), ('War', 'War'), ('Adv', 'Adventure'), ('Bio', 'Biography'), ('Cri', 'Crime'), ('Dra', 'Drama'), ('Fan', 'Fantasy'), ('His', 'History'), ('Mus', 'Music'), ('Mys', 'Mystery'), ('Sci', 'Sci-Fi'), ('Thr', 'Thriller'), ('Western', 'Western')], max_length=3)),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie.Movie')),
],
),
migrations.CreateModel(
name='WatchTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('watched_at', models.DateField()),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie.Movie')),
],
),
]
|
{
"content_hash": "1abc4cd557fc873d3310c4f028cd874a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 512,
"avg_line_length": 49.22222222222222,
"alnum_prop": 0.5471783295711061,
"repo_name": "pyprism/Hiren-Movie",
"id": "a0cf8082889e6ce8522544ea40e62aeed39cccad",
"size": "2288",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "movie/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28617"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from collections import namedtuple
import json
import os
import random
from string import ascii_letters
import subprocess
from tempfile import NamedTemporaryFile
import time
from atomic_reactor import __version__ as atomic_reactor_version
from atomic_reactor import start_time as atomic_reactor_start_time
from atomic_reactor.plugin import ExitPlugin
from atomic_reactor.source import GitSource
from atomic_reactor.plugins.post_rpmqa import PostBuildRPMqaPlugin
from atomic_reactor.plugins.pre_add_filesystem import AddFilesystemPlugin
from atomic_reactor.constants import PROG
from atomic_reactor.util import (get_version_of_tools, get_checksums,
get_build_json, get_preferred_label)
from atomic_reactor.koji_util import create_koji_session, TaskWatcher
from dockerfile_parse import DockerfileParser
from osbs.conf import Configuration
from osbs.api import OSBS
from osbs.exceptions import OsbsException
# An output file and its metadata
Output = namedtuple('Output', ['file', 'metadata'])
class KojiUploadLogger(object):
def __init__(self, logger, notable_percent=10):
self.logger = logger
self.notable_percent = notable_percent
self.last_percent_done = 0
def callback(self, offset, totalsize, size, t1, t2): # pylint: disable=W0613
if offset == 0:
self.logger.debug("upload size: %.1fMiB", totalsize / 1024.0 / 1024)
if not totalsize or not t1:
return
percent_done = 100 * offset / totalsize
if (percent_done >= 99 or
percent_done - self.last_percent_done >= self.notable_percent):
self.last_percent_done = percent_done
self.logger.debug("upload: %d%% done (%.1f MiB/sec)",
percent_done, size / t1 / 1024 / 1024)
class KojiPromotePlugin(ExitPlugin):
"""
Promote this build to Koji
Submits a successful build to Koji using the Content Generator API,
https://fedoraproject.org/wiki/Koji/ContentGenerators
Authentication is with Kerberos unless the koji_ssl_certs
configuration parameter is given, in which case it should be a
path at which 'cert', 'ca', and 'serverca' are the certificates
for SSL authentication.
If Kerberos is used for authentication, the default principal will
be used (from the kernel keyring) unless both koji_keytab and
koji_principal are specified. The koji_keytab parameter is a
keytab name like 'type:name', and so can be used to specify a key
in a Kubernetes secret by specifying 'FILE:/path/to/key'.
If metadata_only is set, the 'docker save' image will not be
uploaded, only the logs. The import will be marked as
metadata-only.
Runs as an exit plugin in order to capture logs from all other
plugins.
"""
key = "koji_promote"
is_allowed_to_fail = False
def __init__(self, tasker, workflow, kojihub, url,
verify_ssl=True, use_auth=True,
koji_ssl_certs=None, koji_proxy_user=None,
koji_principal=None, koji_keytab=None,
metadata_only=False, blocksize=None,
target=None, poll_interval=5):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param kojihub: string, koji hub (xmlrpc)
:param url: string, URL for OSv3 instance
:param verify_ssl: bool, verify OSv3 SSL certificate?
:param use_auth: bool, initiate authentication with OSv3?
:param koji_ssl_certs: str, path to 'cert', 'ca', 'serverca'
:param koji_proxy_user: str, user to log in as (requires hub config)
:param koji_principal: str, Kerberos principal (must specify keytab)
:param koji_keytab: str, keytab name (must specify principal)
:param metadata_only: bool, whether to omit the 'docker save' image
:param blocksize: int, blocksize to use for uploading files
:param target: str, koji target
:param poll_interval: int, seconds between Koji task status requests
"""
super(KojiPromotePlugin, self).__init__(tasker, workflow)
self.kojihub = kojihub
self.koji_ssl_certs = koji_ssl_certs
self.koji_proxy_user = koji_proxy_user
self.koji_principal = koji_principal
self.koji_keytab = koji_keytab
self.metadata_only = metadata_only
self.blocksize = blocksize
self.target = target
self.poll_interval = poll_interval
self.namespace = get_build_json().get('metadata', {}).get('namespace', None)
osbs_conf = Configuration(conf_file=None, openshift_uri=url,
use_auth=use_auth, verify_ssl=verify_ssl,
namespace=self.namespace)
self.osbs = OSBS(osbs_conf, osbs_conf)
self.build_id = None
self.nvr_image = None
@staticmethod
def parse_rpm_output(output, tags, separator=';'):
"""
Parse output of the rpm query.
:param output: list, decoded output (str) from the rpm subprocess
:param tags: list, str fields used for query output
:return: list, dicts describing each rpm package
"""
def field(tag):
"""
Get a field value by name
"""
try:
value = fields[tags.index(tag)]
except ValueError:
return None
if value == '(none)':
return None
return value
components = []
sigmarker = 'Key ID '
for rpm in output:
fields = rpm.rstrip('\n').split(separator)
if len(fields) < len(tags):
continue
signature = field('SIGPGP:pgpsig') or field('SIGGPG:pgpsig')
if signature:
parts = signature.split(sigmarker, 1)
if len(parts) > 1:
signature = parts[1]
component_rpm = {
'type': 'rpm',
'name': field('NAME'),
'version': field('VERSION'),
'release': field('RELEASE'),
'arch': field('ARCH'),
'sigmd5': field('SIGMD5'),
'signature': signature,
}
# Special handling for epoch as it must be an integer or None
epoch = field('EPOCH')
if epoch is not None:
epoch = int(epoch)
component_rpm['epoch'] = epoch
if component_rpm['name'] != 'gpg-pubkey':
components.append(component_rpm)
return components
def get_rpms(self):
"""
Build a list of installed RPMs in the format required for the
metadata.
"""
tags = [
'NAME',
'VERSION',
'RELEASE',
'ARCH',
'EPOCH',
'SIGMD5',
'SIGPGP:pgpsig',
'SIGGPG:pgpsig',
]
sep = ';'
fmt = sep.join(["%%{%s}" % tag for tag in tags])
cmd = "/bin/rpm -qa --qf '{0}\n'".format(fmt)
try:
# py3
(status, output) = subprocess.getstatusoutput(cmd)
except AttributeError:
# py2
with open('/dev/null', 'r+') as devnull:
p = subprocess.Popen(cmd,
shell=True,
stdin=devnull,
stdout=subprocess.PIPE,
stderr=devnull)
(stdout, stderr) = p.communicate()
status = p.wait()
output = stdout.decode()
if status != 0:
self.log.debug("%s: stderr output: %s", cmd, stderr)
raise RuntimeError("%s: exit code %s" % (cmd, status))
return self.parse_rpm_output(output.splitlines(), tags, separator=sep)
def get_output_metadata(self, path, filename):
"""
Describe a file by its metadata.
:return: dict
"""
checksums = get_checksums(path, ['md5'])
metadata = {'filename': filename,
'filesize': os.path.getsize(path),
'checksum': checksums['md5sum'],
'checksum_type': 'md5'}
if self.metadata_only:
metadata['metadata_only'] = True
return metadata
def get_builder_image_id(self):
"""
Find out the docker ID of the buildroot image we are in.
"""
try:
buildroot_tag = os.environ["OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE"]
except KeyError:
return ''
try:
pod = self.osbs.get_pod_for_build(self.build_id)
all_images = pod.get_container_image_ids()
except OsbsException as ex:
self.log.error("unable to find image id: %r", ex)
return buildroot_tag
try:
return all_images[buildroot_tag]
except KeyError:
self.log.error("Unable to determine buildroot image ID for %s",
buildroot_tag)
return buildroot_tag
def get_buildroot(self, build_id):
"""
Build the buildroot entry of the metadata.
:return: dict, partial metadata
"""
docker_version = self.tasker.get_version()
docker_info = self.tasker.get_info()
host_arch = docker_version['Arch']
if host_arch == 'amd64':
host_arch = 'x86_64'
buildroot = {
'id': 1,
'host': {
'os': docker_info['OperatingSystem'],
'arch': host_arch,
},
'content_generator': {
'name': PROG,
'version': atomic_reactor_version,
},
'container': {
'type': 'docker',
'arch': os.uname()[4],
},
'tools': [
{
'name': tool['name'],
'version': tool['version'],
}
for tool in get_version_of_tools()] + [
{
'name': 'docker',
'version': docker_version['Version'],
},
],
'components': self.get_rpms(),
'extra': {
'osbs': {
'build_id': build_id,
'builder_image_id': self.get_builder_image_id(),
}
},
}
return buildroot
def get_logs(self):
"""
Build the logs entry for the metadata 'output' section
:return: list, Output instances
"""
output = []
# Collect logs from server
try:
logs = self.osbs.get_build_logs(self.build_id)
except OsbsException as ex:
self.log.error("unable to get build logs: %r", ex)
else:
# Deleted once closed
logfile = NamedTemporaryFile(prefix=self.build_id,
suffix=".log",
mode='w')
logfile.write(logs)
logfile.flush()
metadata = self.get_output_metadata(logfile.name,
"openshift-final.log")
output.append(Output(file=logfile, metadata=metadata))
docker_logs = NamedTemporaryFile(prefix="docker-%s" % self.build_id,
suffix=".log",
mode='w')
docker_logs.write("\n".join(self.workflow.build_logs))
docker_logs.flush()
output.append(Output(file=docker_logs,
metadata=self.get_output_metadata(docker_logs.name,
"build.log")))
return output
def get_image_components(self):
"""
Re-package the output of the rpmqa plugin into the format required
for the metadata.
"""
try:
output = self.workflow.postbuild_results[PostBuildRPMqaPlugin.key]
except KeyError:
self.log.error("%s plugin did not run!",
PostBuildRPMqaPlugin.key)
return []
return self.parse_rpm_output(output, PostBuildRPMqaPlugin.rpm_tags,
separator=',')
def get_image_output(self, arch):
"""
Create the output for the image
This is the Koji Content Generator metadata, along with the
'docker save' output to upload.
For metadata-only builds, an empty file is used instead of the
output of 'docker save'.
:param arch: str, architecture for this output
:return: tuple, (metadata dict, Output instance)
"""
image_id = self.workflow.builder.image_id
saved_image = self.workflow.exported_image_sequence[-1].get('path')
ext = saved_image.split('.', 1)[1]
name_fmt = 'docker-image-{id}.{arch}.{ext}'
image_name = name_fmt.format(id=image_id, arch=arch, ext=ext)
if self.metadata_only:
metadata = self.get_output_metadata(os.path.devnull, image_name)
output = Output(file=None, metadata=metadata)
else:
metadata = self.get_output_metadata(saved_image, image_name)
output = Output(file=open(saved_image), metadata=metadata)
return metadata, output
def get_digests(self):
"""
Returns a map of repositories to digests
"""
digests = {} # repository -> digest
for registry in self.workflow.push_conf.docker_registries:
for image in self.workflow.tag_conf.images:
image_str = image.to_str()
if image_str in registry.digests:
digest = registry.digests[image_str]
digests[image.to_str(registry=False)] = digest
return digests
def get_repositories(self, digests):
"""
Build the repositories metadata
:param digests: dict, repository -> digest
"""
if self.workflow.push_conf.pulp_registries:
# If pulp was used, only report pulp images
registries = self.workflow.push_conf.pulp_registries
else:
# Otherwise report all the images we pushed
registries = self.workflow.push_conf.all_registries
output_images = []
for registry in registries:
image = self.nvr_image.copy()
image.registry = registry.uri
pullspec = image.to_str()
output_images.append(pullspec)
digest = digests.get(image.to_str(registry=False))
if digest:
digest_pullspec = image.to_str(tag=False) + "@" + digest
output_images.append(digest_pullspec)
return output_images
def get_output(self, buildroot_id):
"""
Build the 'output' section of the metadata.
:return: list, Output instances
"""
def add_buildroot_id(output):
logfile, metadata = output
metadata.update({'buildroot_id': buildroot_id})
return Output(file=logfile, metadata=metadata)
def add_log_type(output):
logfile, metadata = output
metadata.update({'type': 'log', 'arch': 'noarch'})
return Output(file=logfile, metadata=metadata)
output_files = [add_log_type(add_buildroot_id(metadata))
for metadata in self.get_logs()]
# Parent of squashed built image is base image
image_id = self.workflow.builder.image_id
parent_id = self.workflow.base_image_inspect['Id']
digests = self.get_digests()
repositories = self.get_repositories(digests)
arch = os.uname()[4]
metadata, output = self.get_image_output(arch)
metadata.update({
'arch': arch,
'type': 'docker-image',
'components': self.get_image_components(),
'extra': {
'image': {
'arch': arch,
},
'docker': {
'id': image_id,
'parent_id': parent_id,
'repositories': repositories,
},
},
})
# Add the 'docker save' image to the output
image = add_buildroot_id(output)
output_files.append(image)
return output_files
def get_build(self, metadata):
start_time = int(atomic_reactor_start_time)
labels = DockerfileParser(self.workflow.builder.df_path).labels
component = get_preferred_label(labels, 'com.redhat.component')
version = get_preferred_label(labels, 'version')
release = get_preferred_label(labels, 'release')
source = self.workflow.source
if not isinstance(source, GitSource):
raise RuntimeError('git source required')
extra = {'image': {}}
koji_task_id = metadata.get('labels', {}).get('koji-task-id')
if koji_task_id is not None:
self.log.info("build configuration created by Koji Task ID %s",
koji_task_id)
extra['container_koji_task_id'] = koji_task_id
fs_result = self.workflow.prebuild_results.get(AddFilesystemPlugin.key)
if fs_result is not None:
try:
task_id = fs_result['filesystem-koji-task-id']
except KeyError:
self.log.error("%s: expected filesystem-koji-task-id in result",
AddFilesystemPlugin.key)
else:
extra['filesystem_koji_task_id'] = str(task_id)
build = {
'name': component,
'version': version,
'release': release,
'source': "{0}#{1}".format(source.uri, source.commit_id),
'start_time': start_time,
'end_time': int(time.time()),
'extra': extra,
}
if self.metadata_only:
build['metadata_only'] = True
return build
def get_metadata(self):
"""
Build the metadata needed for importing the build
:return: tuple, the metadata and the list of Output instances
"""
try:
metadata = get_build_json()["metadata"]
self.build_id = metadata["name"]
except KeyError:
self.log.error("No build metadata")
raise
for image in self.workflow.tag_conf.primary_images:
# dash at first/last postition does not count
if '-' in image.tag[1:-1]:
self.nvr_image = image
break
else:
raise RuntimeError('Unable to determine name:version-release')
metadata_version = 0
build = self.get_build(metadata)
buildroot = self.get_buildroot(build_id=self.build_id)
output_files = self.get_output(buildroot['id'])
koji_metadata = {
'metadata_version': metadata_version,
'build': build,
'buildroots': [buildroot],
'output': [output.metadata for output in output_files],
}
return koji_metadata, output_files
def upload_file(self, session, output, serverdir):
"""
Upload a file to koji
:return: str, pathname on server
"""
name = output.metadata['filename']
self.log.debug("uploading %r to %r as %r",
output.file.name, serverdir, name)
kwargs = {}
if self.blocksize is not None:
kwargs['blocksize'] = self.blocksize
self.log.debug("using blocksize %d", self.blocksize)
upload_logger = KojiUploadLogger(self.log)
session.uploadWrapper(output.file.name, serverdir, name=name,
callback=upload_logger.callback, **kwargs)
path = os.path.join(serverdir, name)
self.log.debug("uploaded %r", path)
return path
@staticmethod
def get_upload_server_dir():
"""
Create a path name for uploading files to
:return: str, path name expected to be unique
"""
dir_prefix = 'koji-promote'
random_chars = ''.join([random.choice(ascii_letters)
for _ in range(8)])
unique_fragment = '%r.%s' % (time.time(), random_chars)
return os.path.join(dir_prefix, unique_fragment)
def login(self):
"""
Log in to koji
:return: koji.ClientSession instance, logged in
"""
auth_info = {
"proxyuser": self.koji_proxy_user,
"ssl_certs_dir": self.koji_ssl_certs,
"krb_principal": self.koji_principal,
"krb_keytab": self.koji_keytab
}
return create_koji_session(self.kojihub, auth_info)
def run(self):
"""
Run the plugin.
"""
if ((self.koji_principal and not self.koji_keytab) or
(self.koji_keytab and not self.koji_principal)):
raise RuntimeError("specify both koji_principal and koji_keytab "
"or neither")
# Only run if the build was successful
if self.workflow.build_process_failed:
self.log.info("Not promoting failed build to koji")
return
koji_metadata, output_files = self.get_metadata()
try:
session = self.login()
server_dir = self.get_upload_server_dir()
for output in output_files:
if output.file:
self.upload_file(session, output, server_dir)
finally:
for output in output_files:
if output.file:
output.file.close()
try:
build_info = session.CGImport(koji_metadata, server_dir)
except Exception:
self.log.debug("metadata: %r", koji_metadata)
raise
# Older versions of CGImport do not return a value.
build_id = build_info.get("id") if build_info else None
self.log.debug("Build information: %s",
json.dumps(build_info, sort_keys=True, indent=4))
# Tag the build
if build_id is not None and self.target is not None:
self.log.debug("Finding build tag for target %s", self.target)
target_info = session.getBuildTarget(self.target)
build_tag = target_info['dest_tag_name']
self.log.info("Tagging build with %s", build_tag)
task_id = session.tagBuild(build_tag, build_id)
task = TaskWatcher(session, task_id,
poll_interval=self.poll_interval)
task.wait()
if task.failed():
raise RuntimeError("Task %s failed to tag koji build" % task_id)
return build_id
|
{
"content_hash": "91d8bcad9821bd977a2d15abb7273811",
"timestamp": "",
"source": "github",
"line_count": 678,
"max_line_length": 84,
"avg_line_length": 34.46755162241888,
"alnum_prop": 0.548675595874877,
"repo_name": "jpopelka/atomic-reactor",
"id": "4d8989e8dc0b706e4d9dcb4e8572eab6351355ee",
"size": "23369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atomic_reactor/plugins/exit_koji_promote.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "570871"
},
{
"name": "Shell",
"bytes": "3589"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('promoted', '0011_create_switch_for_subscription'),
]
operations = [
migrations.RenameField(
model_name='promotedsubscription',
old_name='paid_at',
new_name='payment_completed_at',
),
]
|
{
"content_hash": "2f8b333c7893defb4b9aae58d079dd48",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 60,
"avg_line_length": 22.25,
"alnum_prop": 0.5898876404494382,
"repo_name": "mozilla/olympia",
"id": "b959c801442bfc6fe23c1400f984ae42c9d09b54",
"size": "406",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/olympia/promoted/migrations/0012_auto_20201022_0903.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3997396"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from oslo_log import versionutils
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'oslo_messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
'requests.packages.urllib3.util.retry=WARN',
'urllib3.util.retry=WARN',
'keystonemiddleware=WARN', 'routes.middleware=WARN',
'stevedore=WARN', 'taskflow=WARN',
'keystoneauth=WARN', 'oslo.cache=INFO',
'oslo_policy=INFO',
'dogpile.core.dogpile=INFO']
_IGNORE_MESSAGE = "This option is ignored if log_config_append is set."
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
mutable=True,
help='If set to true, the logging level will be set to '
'DEBUG instead of the default INFO level.'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
mutable=True,
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation. Note that '
'when logging configuration files are used then all '
'logging configuration is set in the configuration file '
'and other logging configuration options are ignored '
'(for example, log-date-format).'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Defines the format string for %%(asctime)s in log '
'records. Default: %(default)s . '
+ _IGNORE_MESSAGE),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to send logging output to. '
'If no default is set, logging will go to stderr as '
'defined by use_stderr. '
+ _IGNORE_MESSAGE),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative log_file '
' paths. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('watch-log-file',
default=False,
help='Uses logging handler designed to watch file '
'system. When log file is moved or removed this handler '
'will open a new log file with specified path '
'instantaneously. It makes sense only if log_file option '
'is specified and Linux platform is used. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED '
'and will be changed later to honor RFC5424. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('use-journal',
default=False,
help='Enable journald for logging. '
'If running in a systemd environment you may wish '
'to enable journal support. Doing so will use the '
'journal native protocol which includes structured '
'metadata in addition to log messages.'
+ _IGNORE_MESSAGE),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('use-json',
default=False,
help='Use JSON formatting for logging. '
+ _IGNORE_MESSAGE),
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=False,
help='Log output to standard error. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('use_eventlog',
default=False,
help='Log output to Windows Event Log.'),
cfg.IntOpt('log_rotate_interval',
default=1,
help='The amount of time before the log files are rotated. '
'This option is ignored unless log_rotation_type is set '
'to "interval".'),
cfg.StrOpt('log_rotate_interval_type',
choices=['Seconds', 'Minutes', 'Hours', 'Days', 'Weekday',
'Midnight'],
ignore_case=True,
default='days',
help='Rotation interval type. The time of the last file '
'change (or the time when the service was started) is '
'used when scheduling the next rotation.'),
cfg.IntOpt('max_logfile_count',
default=30,
help='Maximum number of rotated log files.'),
cfg.IntOpt('max_logfile_size_mb',
default=200,
help='Log file maximum size in MB. This option is ignored if '
'"log_rotation_type" is not set to "size".'),
cfg.StrOpt('log_rotation_type',
default='none',
choices=[('interval',
'Rotate logs at predefined time intervals.'),
('size',
'Rotate logs once they reach a predefined size.'),
('none', 'Do not rotate log files.')],
ignore_case=True,
help='Log rotation type.')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(global_request_id)s %(request_id)s '
'%(user_identity)s] %(instance)s%(message)s',
help='Format string to use for log messages with context. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages when context is '
'undefined. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Additional data to append to log message when logging '
'level for the message is DEBUG. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.StrOpt('logging_user_identity_format',
default='%(user)s %(project)s %(domain)s %(system_scope)s '
'%(user_domain)s %(project_domain)s',
help='Defines the format string for %(user_identity)s that '
'is used in logging_context_format_string. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of package logging levels in logger=LEVEL pairs. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
cfg.IntOpt('rate_limit_interval',
default=0,
help='Interval, number of seconds, of log rate limiting.'),
cfg.IntOpt('rate_limit_burst',
default=0,
help='Maximum number of logged messages per '
'rate_limit_interval.'),
cfg.StrOpt('rate_limit_except_level',
default='CRITICAL',
help='Log level name used by rate limiting: CRITICAL, ERROR, '
'INFO, WARNING, DEBUG or empty string. Logs with level '
'greater or equal to rate_limit_except_level are not '
'filtered. An empty string means that all levels are '
'filtered.'),
]
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator (oslo-config-generator) to discover the options exposed to users
by this library.
:returns: a list of (group_name, opts) tuples
"""
return [(None, (common_cli_opts + logging_cli_opts +
generic_log_opts + log_opts +
versionutils.deprecated_opts))]
|
{
"content_hash": "0a69fa9f3eeb775e7804cf2deeafad5c",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 79,
"avg_line_length": 47.465753424657535,
"alnum_prop": 0.539009139009139,
"repo_name": "openstack/oslo.log",
"id": "b1917fb8ae0fd3994baaa1c4a49662e1852ee652",
"size": "10968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_log/_options.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "218052"
}
],
"symlink_target": ""
}
|
"""For accessing cihai as a package."""
import os
import sys
def run():
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, base)
from .process import Packager
p = Packager.from_cli(sys.argv[1:])
p.download()
p.export()
if __name__ == '__main__':
sys.exit(run())
|
{
"content_hash": "c93a073068de1773e773e8a9b2becc22",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 18.38888888888889,
"alnum_prop": 0.6042296072507553,
"repo_name": "cihai/cihaidata-unihan",
"id": "688cb941a3dbfc3540ff20f5e6b30d767acd3525",
"size": "377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unihan_etl/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "807"
},
{
"name": "Python",
"bytes": "43894"
}
],
"symlink_target": ""
}
|
import fractions, time, unittest
from bibliopixel.control import midi
from argparse import Namespace
from test.bibliopixel import patch
C3 = Namespace(type='note_on', note=32, channel=1, velocity=96)
C3_ZERO = Namespace(type='note_on', note=32, channel=1, velocity=0)
C3_OFF = Namespace(type='note_off', note=32, channel=1, velocity=0, x=47)
BC = Namespace(type='control_change', channel=2, control=2, value=10)
BC3 = Namespace(type='control_change', channel=3, control=2, value=127)
MOD = Namespace(type='control_change', channel=2, control=1, value=127)
PB = Namespace(type='pitchwheel', channel=2, pitch=0x400)
OTHER = Namespace(type='other', channel=32, thing='stuff')
class FakeMido:
def __init__(self, msgs):
def multi_port(x, yield_ports=False):
if yield_ports:
return (('fake_port', i) for i in msgs)
else:
return msgs
self.ports = self
self.ports.MultiPort = multi_port
def get_input_names(self):
class Port(str):
def name(self):
return self
return [Port('one'), Port('two')]
def open_input(self, x):
return x
class MidiTest(unittest.TestCase):
routing = {
'note_on': '.note_on',
'note_off': '.note_off',
'control_change': {
'1': '.cc1',
'2': '.cc2',
},
'pitchwheel': '.pitch',
}
def run_test(self, msgs, expected, routing=None, **kwds):
with patch.patch(midi, 'mido', FakeMido(msgs)):
class Root:
pass
root = Root()
m = midi.Midi(routing=routing or self.routing, **kwds)
m.set_project(root)
with m.run_until_stop():
time.sleep(0.1)
self.assertEqual(vars(root), expected)
def test_one(self):
expected = {'note_on': (32, fractions.Fraction(96, 127))}
self.run_test([C3], expected)
def test_accept(self):
expected = {
'cc1': 1,
'cc2': 1,
'note_off': (32, 0),
'note_on': (32, fractions.Fraction(96, 127)),
'pitch': fractions.Fraction(-7, 8),
}
self.run_test([C3, C3_OFF, BC, BC3, MOD, PB, OTHER], expected)
def test_use_note_on(self):
expected_on = {'note_on': (32, 0)}
expected_off = {'note_off': (32, 0)}
self.run_test([C3_ZERO], expected_on, use_note_off=False)
self.run_test([C3_ZERO], expected_on, use_note_off=None)
self.run_test([C3_ZERO], expected_off, use_note_off=True)
self.run_test([C3_OFF], expected_on, use_note_off=False)
self.run_test([C3_OFF], expected_off, use_note_off=None)
self.run_test([C3_OFF], expected_off, use_note_off=True)
|
{
"content_hash": "649197605246512ecac83b1f6be4c8f9",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 73,
"avg_line_length": 32.71764705882353,
"alnum_prop": 0.565623876303488,
"repo_name": "rec/BiblioPixel",
"id": "d21e4c8840b33ba644c16a82e72f4dbecc58be0f",
"size": "2781",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "test/bibliopixel/control/midi_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20651"
},
{
"name": "HTML",
"bytes": "3310"
},
{
"name": "JavaScript",
"bytes": "5140"
},
{
"name": "Python",
"bytes": "673520"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
}
|
import time
import logging
from ..exceptions import (
ProtocolError,
ConnectTimeoutError,
ReadTimeoutError,
MaxRetryError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we can't
assume that the server did not process any of it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/response retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries?
"""
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
else:
# FIXME: Nothing changed, scenario doesn't make sense.
_observed_errors += 1
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error)
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
{
"content_hash": "a259c0c550ea38926bcd075f0e11f139",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 104,
"avg_line_length": 35.22222222222222,
"alnum_prop": 0.6005902106441436,
"repo_name": "ThinkboxSoftware/Deadline",
"id": "130217bfdb357839411ca548038691ef2a86f1f4",
"size": "9827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Custom/events/Zabbix/API/requests/packages/urllib3/util/retry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "698"
},
{
"name": "Dockerfile",
"bytes": "3978"
},
{
"name": "Python",
"bytes": "1011982"
},
{
"name": "Ruby",
"bytes": "6570"
},
{
"name": "Shell",
"bytes": "7174"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
class Chat(object):
def __init__(self, app, id, name, adapter, private=False):
self.app = app
self.id = id
self.name = name
self.private = private
self.adapter = adapter
self.storage = app.get_storage('communication:{}:chats:{}'.format(
self.adapter.name, self.id))
def __unicode__(self):
return '{} ({}, private={})'.format(
self.name, self.id, self.private)
def send_message(self, text):
self.adapter.send_message(self, text)
|
{
"content_hash": "5f18c6fd29d87d026c828a31b25746c4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 74,
"avg_line_length": 28.65,
"alnum_prop": 0.5759162303664922,
"repo_name": "KiraLT/KiraBot",
"id": "9e7beae7f4cfbf36c8a62e73d3a336bec77f3803",
"size": "573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/kirabot/kirabot/communication/chat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24766"
},
{
"name": "Shell",
"bytes": "2037"
}
],
"symlink_target": ""
}
|
"""
Extending on demo-03, implements an event callback we can use to process the
incoming data.
"""
import sys
import time
from ant.core import driver
from ant.core import node
from ant.core import event
from ant.core import message
from ant.core.constants import *
from config import *
NETKEY = '\xB9\xA5\x21\xFB\xBD\x72\xC3\x45'
# A run-the-mill event listener
class HRMListener(event.EventCallback):
def process(self, msg):
if isinstance(msg, message.ChannelBroadcastDataMessage):
print 'Heart Rate:', ord(msg.payload[-1])
# Initialize
stick = driver.USB2Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Setup channel
key = node.NetworkKey('N:ANT+', NETKEY)
antnode.setNetworkKey(0, key)
channel = antnode.getFreeChannel()
channel.name = 'C:HRM'
channel.assign('N:ANT+', CHANNEL_TYPE_TWOWAY_RECEIVE)
channel.setID(120, 0, 0)
channel.setSearchTimeout(TIMEOUT_NEVER)
channel.setPeriod(8070)
channel.setFrequency(57)
channel.open()
# Setup callback
# Note: We could also register an event listener for non-channel events by
# calling registerEventListener() on antnode rather than channel.
channel.registerCallback(HRMListener())
# Wait
print "Listening for HR monitor events (120 seconds)..."
time.sleep(120)
# Shutdown
channel.close()
channel.unassign()
antnode.stop()
|
{
"content_hash": "8c44605a087f11b23106b862e8c2c174",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 24.345454545454544,
"alnum_prop": 0.7520537714712472,
"repo_name": "tomwardill/python-ant",
"id": "815dd7bbaa65cc8fcc517cae722b912b4be107aa",
"size": "1339",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "demos/ant.core/04-processevents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99512"
}
],
"symlink_target": ""
}
|
"""Gaussian processes regression."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Modified by: Pete Green <p.l.green@liverpool.ac.uk>
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
import scipy.optimize
from ..base import BaseEstimator, RegressorMixin, clone
from ..base import MultiOutputMixin
from .kernels import RBF, ConstantKernel as C
from ..utils import check_random_state
from ..utils.optimize import _check_optimize_result
from ..utils.validation import _deprecate_positional_args
class GaussianProcessRegressor(MultiOutputMixin,
RegressorMixin, BaseEstimator):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed"
* RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that
the kernel hyperparameters are optimized during fitting unless the
bounds are marked as "fixed".
alpha : float or ndarray of shape (n_samples,), default=1e-10
Value added to the diagonal of the kernel matrix during fitting.
This can prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
It can also be interpreted as the variance of additional Gaussian
measurement noise on the training observations. Note that this is
different from using a `WhiteKernel`. If an array is passed, it must
have the same number of entries as the data used for fitting and is
used as datapoint-dependent noise level. Allowing to specify the
noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : "fmin_l_bfgs_b" or callable, default="fmin_l_bfgs_b"
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be minimized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BGFS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : bool, default=False
Whether the target values y are normalized, the mean and variance of
the target values are set equal to 0 and 1 respectively. This is
recommended for cases where zero-mean, unit-variance priors are used.
Note that, in this implementation, the normalisation is reversed
before the GP predictions are reported.
.. versionchanged:: 0.23
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Attributes
----------
X_train_ : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data (also
required for prediction).
y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values in training data (also required for prediction)
kernel_ : kernel instance
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like of shape (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like of shape (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
"""
@_deprecate_positional_args
def __init__(self, kernel=None, *, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
if self.kernel_.requires_vector_input:
X, y = self._validate_data(X, y, multi_output=True, y_numeric=True,
ensure_2d=True, dtype="numeric")
else:
X, y = self._validate_data(X, y, multi_output=True, y_numeric=True,
ensure_2d=False, dtype=None)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = np.std(y, axis=0)
# Remove mean and make unit variance
y = (y - self._y_train_mean) / self._y_train_std
else:
self._y_train_mean = np.zeros(1)
self._y_train_std = 1
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta,
clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta,
clone_kernel=False)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
# self.L_ changed, self._K_inv needs to be recomputed
self._K_inv = None
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution a query points.
y_std : ndarray of shape (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when `return_cov` is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
if self.kernel is None or self.kernel.requires_vector_input:
X = self._validate_data(X, ensure_2d=True, dtype="numeric",
reset=False)
else:
X = self._validate_data(X, ensure_2d=False, dtype=None,
reset=False)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
# undo normalisation
y_mean = self._y_train_std * y_mean + self._y_train_mean
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
# undo normalisation
y_cov = y_cov * self._y_train_std**2
return y_mean, y_cov
elif return_std:
# cache result of K_inv computation
if self._K_inv is None:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T,
np.eye(self.L_.shape[0]))
self._K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ij->i",
np.dot(K_trans, self._K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
# undo normalisation
y_var = y_var * self._y_train_std**2
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
n_samples : int, default=1
The number of samples drawn from the Gaussian process
random_state : int, RandomState instance or None, default=0
Determines random number generation to randomly draw samples.
Pass an int for reproducible results across multiple function
calls.
See :term: `Glossary <random_state>`.
Returns
-------
y_samples : ndarray of shape (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False,
clone_kernel=True):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,jik->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
opt_res = scipy.optimize.minimize(
obj_func, initial_theta, method="L-BFGS-B", jac=True,
bounds=bounds)
_check_optimize_result("lbfgs", opt_res)
theta_opt, func_min = opt_res.x, opt_res.fun
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
def _more_tags(self):
return {'requires_fit': False}
|
{
"content_hash": "a937bf4684f0dea74cb51b10f1bbed64",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 79,
"avg_line_length": 43.232245681381954,
"alnum_prop": 0.5831113478955781,
"repo_name": "ogrisel/scikit-learn",
"id": "b4ab0441efc7135a598d5f720d0162d3f8997a76",
"size": "22524",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "sklearn/gaussian_process/_gpr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41025"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1713"
},
{
"name": "Python",
"bytes": "9787260"
},
{
"name": "Shell",
"bytes": "43738"
}
],
"symlink_target": ""
}
|
import unittest
from conans.test.tools import TestClient
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.paths import CONANFILE
import textwrap
class InfoTest(unittest.TestCase):
def _create(self, number, version, deps=None, deps_dev=None, export=True):
files = cpp_hello_conan_files(number, version, deps, build=False)
files[CONANFILE] = files[CONANFILE].replace("config(", "configure(")
if deps_dev:
files[CONANFILE] = files[CONANFILE].replace("exports = '*'", """exports = '*'
dev_requires=%s
""" % ",".join('"%s"' % d for d in deps_dev))
self.client.save(files, clean_first=True)
if export:
self.client.run("export lasote/stable")
expected_output = textwrap.dedent(
"""\
WARN: Conanfile doesn't have 'url'.
It is recommended to add your repo URL as attribute
WARN: Conanfile doesn't have a 'license'.
It is recommended to add the package license as attribute""")
self.assertIn(expected_output, self.client.user_io.out)
if number != "Hello2":
files[CONANFILE] = files[CONANFILE].replace('version = "0.1"',
'version = "0.1"\n'
' url= "myurl"\n'
' license = "MIT"')
else:
files[CONANFILE] = files[CONANFILE].replace('version = "0.1"',
'version = "0.1"\n'
' url= "myurl"\n'
' license = "MIT", "GPL"')
self.client.save(files)
if export:
self.client.run("export lasote/stable")
self.assertNotIn("WARN: Conanfile doesn't have 'url'", self.client.user_io.out)
def reuse_test(self):
self.client = TestClient()
self._create("Hello0", "0.1")
self._create("Hello1", "0.1", ["Hello0/0.1@lasote/stable"])
self._create("Hello2", "0.1", ["Hello1/0.1@lasote/stable"], export=False)
self.client.run("info -u")
expected_output = textwrap.dedent(
"""\
Hello2/0.1@PROJECT
URL: myurl
Licenses: MIT, GPL
Requires:
Hello1/0.1@lasote/stable
Hello0/0.1@lasote/stable
Remote: None
URL: myurl
License: MIT
Updates: You have the latest version (None)
Required by:
Hello1/0.1@lasote/stable
Hello1/0.1@lasote/stable
Remote: None
URL: myurl
License: MIT
Updates: You have the latest version (None)
Required by:
Hello2/0.1@PROJECT
Requires:
Hello0/0.1@lasote/stable""")
self.assertIn(expected_output, self.client.user_io.out)
self.client.run("info -u --only=url")
expected_output = textwrap.dedent(
"""\
Hello2/0.1@PROJECT
URL: myurl
Hello0/0.1@lasote/stable
URL: myurl
Hello1/0.1@lasote/stable
URL: myurl""")
self.assertIn(expected_output, self.client.user_io.out)
self.client.run("info -u --only=url,license")
expected_output = textwrap.dedent(
"""\
Hello2/0.1@PROJECT
URL: myurl
Licenses: MIT, GPL
Hello0/0.1@lasote/stable
URL: myurl
License: MIT
Hello1/0.1@lasote/stable
URL: myurl
License: MIT""")
self.assertIn(expected_output, self.client.user_io.out)
def build_order_test(self):
self.client = TestClient()
self._create("Hello0", "0.1")
self._create("Hello1", "0.1", ["Hello0/0.1@lasote/stable"])
self._create("Hello2", "0.1", ["Hello1/0.1@lasote/stable"], export=False)
self.client.run("info -bo=Hello0/0.1@lasote/stable")
self.assertIn("[Hello0/0.1@lasote/stable], [Hello1/0.1@lasote/stable]",
self.client.user_io.out)
self.client.run("info -bo=Hello1/0.1@lasote/stable")
self.assertIn("[Hello1/0.1@lasote/stable]", self.client.user_io.out)
self.client.run("info -bo=Hello1/0.1@lasote/stable -bo=Hello0/0.1@lasote/stable")
self.assertIn("[Hello0/0.1@lasote/stable], [Hello1/0.1@lasote/stable]",
self.client.user_io.out)
self.client.run("info Hello1/0.1@lasote/stable -bo=Hello0/0.1@lasote/stable")
self.assertEqual("[Hello0/0.1@lasote/stable], [Hello1/0.1@lasote/stable]\n",
self.client.user_io.out)
def diamond_build_order_test(self):
self.client = TestClient()
self._create("LibA", "0.1")
self._create("Dev1", "0.1")
self._create("LibE", "0.1", deps_dev=["Dev1/0.1@lasote/stable"])
self._create("LibF", "0.1")
self._create("LibG", "0.1")
self._create("Dev2", "0.1", deps=["LibG/0.1@lasote/stable"])
self._create("LibB", "0.1", ["LibA/0.1@lasote/stable", "LibE/0.1@lasote/stable"])
self._create("LibC", "0.1", ["LibA/0.1@lasote/stable", "LibF/0.1@lasote/stable"],
deps_dev=["Dev2/0.1@lasote/stable"])
self._create("LibD", "0.1", ["LibB/0.1@lasote/stable", "LibC/0.1@lasote/stable"],
export=False)
self.client.run("info -bo=LibA/0.1@lasote/stable")
self.assertIn("[LibA/0.1@lasote/stable], "
"[LibB/0.1@lasote/stable, LibC/0.1@lasote/stable]",
self.client.user_io.out)
self.client.run("info -bo=LibB/0.1@lasote/stable")
self.assertIn("[LibB/0.1@lasote/stable]", self.client.user_io.out)
self.client.run("info -bo=LibE/0.1@lasote/stable")
self.assertIn("[LibE/0.1@lasote/stable], [LibB/0.1@lasote/stable]",
self.client.user_io.out)
self.client.run("info -bo=LibF/0.1@lasote/stable")
self.assertIn("[LibF/0.1@lasote/stable], [LibC/0.1@lasote/stable]",
self.client.user_io.out)
self.client.run("info -bo=Dev1/0.1@lasote/stable")
self.assertEqual("\n", self.client.user_io.out)
self.client.run("info --scope=LibE:dev=True -bo=Dev1/0.1@lasote/stable")
self.assertIn("[Dev1/0.1@lasote/stable], [LibE/0.1@lasote/stable], "
"[LibB/0.1@lasote/stable]", self.client.user_io.out)
self.client.run("info -bo=LibG/0.1@lasote/stable")
self.assertEqual("\n", self.client.user_io.out)
self.client.run("info --scope=LibC:dev=True -bo=LibG/0.1@lasote/stable")
self.assertIn("[LibG/0.1@lasote/stable], [Dev2/0.1@lasote/stable], "
"[LibC/0.1@lasote/stable]", self.client.user_io.out)
self.client.run("info --build_order=ALL")
self.assertIn("[LibA/0.1@lasote/stable, LibE/0.1@lasote/stable, LibF/0.1@lasote/stable], "
"[LibB/0.1@lasote/stable, LibC/0.1@lasote/stable]",
self.client.user_io.out)
self.client.run("info --build_order=ALL --scope=ALL:dev=True")
self.assertIn("[Dev1/0.1@lasote/stable, LibG/0.1@lasote/stable], "
"[Dev2/0.1@lasote/stable, LibA/0.1@lasote/stable, LibE/0.1@lasote/stable, "
"LibF/0.1@lasote/stable], [LibB/0.1@lasote/stable, LibC/0.1@lasote/stable]",
self.client.user_io.out)
|
{
"content_hash": "7f1667f69aa6feddd0dba5aee723ae47",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 98,
"avg_line_length": 45.95882352941177,
"alnum_prop": 0.5297580954818891,
"repo_name": "dragly/conan",
"id": "ec3152645815c07caeec1aa9b2fb2cdc12219d4c",
"size": "7813",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "conans/test/command/info_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1383611"
},
{
"name": "Shell",
"bytes": "1132"
}
],
"symlink_target": ""
}
|
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.devsettings'
if __name__ == '__main__':
"""
Test Django App in travis.ci
"""
import django
from django.core.management import execute_from_command_line
execute_from_command_line([sys.argv[0], "makemigrations", "category", ])
execute_from_command_line([sys.argv[0], "migrate"])
if hasattr(django, 'setup'):
django.setup()
from django.test.runner import DiscoverRunner
failures = DiscoverRunner().run_tests(("category",), verbosity=2)
if failures:
sys.exit(failures)
|
{
"content_hash": "7c1a96efe2b6b7b9205c18a700c4e825",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 29.75,
"alnum_prop": 0.6588235294117647,
"repo_name": "aneumeier/category",
"id": "5f92bfabc1f3b2432e2422bfba90ddf465cca10a",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "categorytest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "11734"
},
{
"name": "Python",
"bytes": "41155"
}
],
"symlink_target": ""
}
|
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
import pyglet
class BackgroundLayer( cocos.layer.Layer ):
def __init__(self):
super( BackgroundLayer, self ).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
self.img.blit(0,0)
if __name__ == "__main__":
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
flip = FlipX3D( duration=2)
main_scene.do( flip + ReverseTime(flip) )
director.run (main_scene)
|
{
"content_hash": "d27bbc3a001bf118e2b6a6ec136051b5",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 65,
"avg_line_length": 24.06896551724138,
"alnum_prop": 0.6504297994269341,
"repo_name": "adamwiggins/cocos2d",
"id": "3434d639f0bde6b8dc56d14d31fe17a4bb5232ef",
"size": "771",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_reverse_time.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "825818"
},
{
"name": "Shell",
"bytes": "3018"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import os
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace, model_helper, brew
import caffe2.python.hypothesis_test_util as htu
import hypothesis.strategies as st
from hypothesis import given
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.net = core.Net("test-net")
self.testblob_ref = self.net.ConstantFill(
[], "testblob", shape=[1, 2, 3, 4], value=1.0)
workspace.ResetWorkspace()
def testRootFolder(self):
self.assertEqual(workspace.ResetWorkspace(), True)
self.assertEqual(workspace.RootFolder(), ".")
self.assertEqual(
workspace.ResetWorkspace("/tmp/caffe-workspace-test"), True)
self.assertEqual(workspace.RootFolder(), "/tmp/caffe-workspace-test")
def testWorkspaceHasBlobWithNonexistingName(self):
self.assertEqual(workspace.HasBlob("non-existing"), False)
def testRunOperatorOnce(self):
self.assertEqual(
workspace.RunOperatorOnce(
self.net.Proto().op[0].SerializeToString()
), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
blobs = workspace.Blobs()
self.assertEqual(len(blobs), 1)
self.assertEqual(blobs[0], "testblob")
def testRunNetOnce(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testCurrentWorkspaceWrapper(self):
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertIn("testblob", workspace.C.Workspace.current.blobs)
workspace.ResetWorkspace()
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
def testRunPlan(self):
plan = core.Plan("test-plan")
plan.AddStep(core.ExecutionStep("test-step", self.net))
self.assertEqual(
workspace.RunPlan(plan.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testConstructPlanFromSteps(self):
step = core.ExecutionStep("test-step-as-plan", self.net)
self.assertEqual(workspace.RunPlan(step), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testResetWorkspace(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertEqual(workspace.ResetWorkspace(), True)
self.assertEqual(workspace.HasBlob("testblob"), False)
def testTensorAccess(self):
ws = workspace.C.Workspace()
""" test in-place modification """
ws.create_blob("tensor").feed(np.array([1.1, 1.2, 1.3]))
tensor = ws.blobs["tensor"].tensor()
tensor.data[0] = 3.3
val = np.array([3.3, 1.2, 1.3])
np.testing.assert_array_equal(tensor.data, val)
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
""" test in-place initialization """
tensor.init([2, 3], core.DataType.INT32)
tensor.data[1, 1] = 100
val = np.zeros([2, 3], dtype=np.int32)
val[1, 1] = 100
np.testing.assert_array_equal(tensor.data, val)
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
""" strings cannot be initialized from python """
with self.assertRaises(RuntimeError):
tensor.init([3, 4], core.DataType.STRING)
""" feed (copy) data into tensor """
val = np.array([[b'abc', b'def'], [b'ghi', b'jkl']], dtype=np.object)
tensor.feed(val)
self.assertEquals(tensor.data[0, 0], b'abc')
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
val = np.array([1.1, 10.2])
tensor.feed(val)
val[0] = 5.2
self.assertEquals(tensor.data[0], 1.1)
""" fetch (copy) data from tensor """
val = np.array([1.1, 1.2])
tensor.feed(val)
val2 = tensor.fetch()
tensor.data[0] = 5.2
val3 = tensor.fetch()
np.testing.assert_array_equal(val, val2)
self.assertEquals(val3[0], 5.2)
def testFetchFeedBlob(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob("testblob")
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
fetched_again = workspace.FetchBlob("testblob")
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testFetchFeedBlobViaBlobReference(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob(self.testblob_ref)
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob(self.testblob_ref, fetched), True)
fetched_again = workspace.FetchBlob("testblob") # fetch by name now
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testFetchFeedBlobTypes(self):
for dtype in [np.float16, np.float32, np.float64, np.bool,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16]:
try:
rng = np.iinfo(dtype).max * 2
except ValueError:
rng = 1000
data = ((np.random.rand(2, 3, 4) - 0.5) * rng).astype(dtype)
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, dtype)
np.testing.assert_array_equal(fetched_back, data)
def testFetchFeedBlobBool(self):
"""Special case for bool to ensure coverage of both true and false."""
data = np.zeros((2, 3, 4)).astype(np.bool)
data.flat[::2] = True
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, np.bool)
np.testing.assert_array_equal(fetched_back, data)
def testFetchFeedBlobZeroDim(self):
data = np.empty(shape=(2, 0, 3), dtype=np.float32)
self.assertEqual(workspace.FeedBlob("testblob_empty", data), True)
fetched_back = workspace.FetchBlob("testblob_empty")
self.assertEqual(fetched_back.shape, (2, 0, 3))
self.assertEqual(fetched_back.dtype, np.float32)
def testFetchFeedLongStringTensor(self):
# long strings trigger array of object creation
strs = np.array([
b' '.join(10 * [b'long string']),
b' '.join(128 * [b'very long string']),
b'small \0\1\2 string',
b"Hello, world! I have special \0 symbols \1!"])
workspace.FeedBlob('my_str_tensor', strs)
strs2 = workspace.FetchBlob('my_str_tensor')
self.assertEqual(strs.shape, strs2.shape)
for i in range(0, strs.shape[0]):
self.assertEqual(strs[i], strs2[i])
def testFetchFeedShortStringTensor(self):
# small strings trigger NPY_STRING array
strs = np.array([b'elem1', b'elem 2', b'element 3'])
workspace.FeedBlob('my_str_tensor_2', strs)
strs2 = workspace.FetchBlob('my_str_tensor_2')
self.assertEqual(strs.shape, strs2.shape)
for i in range(0, strs.shape[0]):
self.assertEqual(strs[i], strs2[i])
def testFetchFeedPlainString(self):
# this is actual string, not a tensor of strings
s = b"Hello, world! I have special \0 symbols \1!"
workspace.FeedBlob('my_plain_string', s)
s2 = workspace.FetchBlob('my_plain_string')
self.assertEqual(s, s2)
def testFetchBlobs(self):
s1 = b"test1"
s2 = b"test2"
workspace.FeedBlob('s1', s1)
workspace.FeedBlob('s2', s2)
fetch1, fetch2 = workspace.FetchBlobs(['s1', 's2'])
self.assertEquals(s1, fetch1)
self.assertEquals(s2, fetch2)
def testFetchFeedViaBlobDict(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.blobs["testblob"]
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
workspace.blobs["testblob"] = fetched
fetched_again = workspace.blobs["testblob"]
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
self.assertTrue("testblob" in workspace.blobs)
self.assertFalse("non_existant" in workspace.blobs)
self.assertEqual(len(workspace.blobs), 1)
for key in workspace.blobs:
self.assertEqual(key, "testblob")
class TestMultiWorkspaces(unittest.TestCase):
def setUp(self):
workspace.SwitchWorkspace("default")
workspace.ResetWorkspace()
def testCreateWorkspace(self):
self.net = core.Net("test-net")
self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertEqual(workspace.SwitchWorkspace("test", True), None)
self.assertEqual(workspace.HasBlob("testblob"), False)
self.assertEqual(workspace.SwitchWorkspace("default"), None)
self.assertEqual(workspace.HasBlob("testblob"), True)
try:
# The following should raise an error.
workspace.SwitchWorkspace("non-existing")
# so this should never happen.
self.assertEqual(True, False)
except RuntimeError:
pass
workspaces = workspace.Workspaces()
self.assertTrue("default" in workspaces)
self.assertTrue("test" in workspaces)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
class TestWorkspaceGPU(test_util.TestCase):
def setUp(self):
workspace.ResetWorkspace()
self.net = core.Net("test-net")
self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.net.RunAllOnGPU()
def testFetchBlobGPU(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob("testblob")
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
fetched_again = workspace.FetchBlob("testblob")
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testDefaultGPUID(self):
self.assertEqual(workspace.SetDefaultGPUID(0), None)
self.assertEqual(workspace.GetDefaultGPUID(), 0)
def testGetCudaPeerAccessPattern(self):
pattern = workspace.GetCudaPeerAccessPattern()
self.assertEqual(type(pattern), np.ndarray)
self.assertEqual(pattern.ndim, 2)
self.assertEqual(pattern.shape[0], pattern.shape[1])
self.assertEqual(pattern.shape[0], workspace.NumCudaDevices())
@unittest.skipIf(not workspace.C.has_mkldnn, "No MKLDNN support.")
class TestWorkspaceMKLDNN(test_util.TestCase):
def testFeedFetchBlobMKLDNN(self):
arr = np.random.randn(2, 3).astype(np.float32)
workspace.FeedBlob(
"testblob_mkldnn", arr, core.DeviceOption(caffe2_pb2.MKLDNN))
fetched = workspace.FetchBlob("testblob_mkldnn")
np.testing.assert_array_equal(arr, fetched)
class TestImmedibate(test_util.TestCase):
def testImmediateEnterExit(self):
workspace.StartImmediate(i_know=True)
self.assertTrue(workspace.IsImmediate())
workspace.StopImmediate()
self.assertFalse(workspace.IsImmediate())
def testImmediateRunsCorrectly(self):
workspace.StartImmediate(i_know=True)
net = core.Net("test-net")
net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.assertEqual(
workspace.ImmediateBlobs(), ["testblob"])
content = workspace.FetchImmediate("testblob")
# Also, the immediate mode should not invade the original namespace,
# so we check if this is so.
with self.assertRaises(RuntimeError):
workspace.FetchBlob("testblob")
np.testing.assert_array_equal(content, 1.0)
content[:] = 2.0
self.assertTrue(workspace.FeedImmediate("testblob", content))
np.testing.assert_array_equal(
workspace.FetchImmediate("testblob"), 2.0)
workspace.StopImmediate()
with self.assertRaises(RuntimeError):
content = workspace.FetchImmediate("testblob")
def testImmediateRootFolder(self):
workspace.StartImmediate(i_know=True)
# for testing we will look into the _immediate_root_folder variable
# but in normal usage you should not access that.
self.assertTrue(len(workspace._immediate_root_folder) > 0)
root_folder = workspace._immediate_root_folder
self.assertTrue(os.path.isdir(root_folder))
workspace.StopImmediate()
self.assertTrue(len(workspace._immediate_root_folder) == 0)
# After termination, immediate mode should have the root folder
# deleted.
self.assertFalse(os.path.exists(root_folder))
class TestCppEnforceAsException(test_util.TestCase):
def testEnforce(self):
op = core.CreateOperator("Relu", ["X"], ["Y"])
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
class TestCWorkspace(htu.HypothesisTestCase):
def test_net_execution(self):
ws = workspace.C.Workspace()
self.assertEqual(ws.nets, {})
self.assertEqual(ws.blobs, {})
net = core.Net("test-net")
net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
ws.create_net(net)
# If we do not specify overwrite, this should raise an error.
with self.assertRaises(RuntimeError):
ws.create_net(net)
# But, if we specify overwrite, this should pass.
ws.create_net(net, True)
# Overwrite can also be a kwarg.
ws.create_net(net, overwrite=True)
self.assertIn("testblob", ws.blobs)
self.assertEqual(len(ws.nets), 1)
net_name = net.Proto().name
self.assertIn("test-net", net_name)
net = ws.nets[net_name].run()
blob = ws.blobs["testblob"]
np.testing.assert_array_equal(
np.ones((1, 2, 3, 4), dtype=np.float32),
blob.fetch())
@given(name=st.text(), value=st.floats(min_value=-1, max_value=1.0))
def test_operator_run(self, name, value):
ws = workspace.C.Workspace()
op = core.CreateOperator(
"ConstantFill", [], [name], shape=[1], value=value)
ws.run(op)
self.assertIn(name, ws.blobs)
np.testing.assert_allclose(
[value], ws.blobs[name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_net_run(self, blob_name, net_name, value):
ws = workspace.C.Workspace()
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
ws.run(net)
self.assertIn(blob_name, ws.blobs)
self.assertNotIn(net_name, ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
plan_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_plan_run(self, blob_name, plan_name, net_name, value):
ws = workspace.C.Workspace()
plan = core.Plan(plan_name)
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
plan.AddStep(core.ExecutionStep("step", nets=[net], num_iter=1))
ws.run(plan)
self.assertIn(blob_name, ws.blobs)
self.assertIn(net.Name(), ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_net_create(self, blob_name, net_name, value):
ws = workspace.C.Workspace()
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
ws.create_net(net).run()
self.assertIn(blob_name, ws.blobs)
self.assertIn(net.Name(), ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(name=st.text(),
value=htu.tensor(),
device_option=st.sampled_from(htu.device_options))
def test_array_serde(self, name, value, device_option):
ws = workspace.C.Workspace()
ws.create_blob(name).feed(value, device_option=device_option)
self.assertIn(name, ws.blobs)
blob = ws.blobs[name]
np.testing.assert_equal(value, ws.blobs[name].fetch())
serde_blob = ws.create_blob("{}_serde".format(name))
serde_blob.deserialize(blob.serialize(name))
np.testing.assert_equal(value, serde_blob.fetch())
@given(name=st.text(), value=st.text())
def test_string_serde(self, name, value):
value = value.encode('ascii', 'ignore')
ws = workspace.C.Workspace()
ws.create_blob(name).feed(value)
self.assertIn(name, ws.blobs)
blob = ws.blobs[name]
self.assertEqual(value, ws.blobs[name].fetch())
serde_blob = ws.create_blob("{}_serde".format(name))
serde_blob.deserialize(blob.serialize(name))
self.assertEqual(value, serde_blob.fetch())
def test_exception(self):
ws = workspace.C.Workspace()
with self.assertRaises(TypeError):
ws.create_net("...")
class TestPredictor(unittest.TestCase):
def _create_model(self):
m = model_helper.ModelHelper()
y = brew.fc(m, "data", "y",
dim_in=4, dim_out=2,
weight_init=('ConstantFill', dict(value=1.0)),
bias_init=('ConstantFill', dict(value=0.0)),
axis=0)
m.net.AddExternalOutput(y)
return m
# Use this test with a bigger model to see how using Predictor allows to
# avoid issues with low protobuf size limit in Python
#
# def test_predictor_predefined(self):
# workspace.ResetWorkspace()
# path = 'caffe2/caffe2/test/assets/'
# with open(path + 'squeeze_predict_net.pb') as f:
# self.predict_net = f.read()
# with open(path + 'squeeze_init_net.pb') as f:
# self.init_net = f.read()
# self.predictor = workspace.Predictor(self.init_net, self.predict_net)
# inputs = [np.zeros((1, 3, 256, 256), dtype='f')]
# outputs = self.predictor.run(inputs)
# self.assertEqual(len(outputs), 1)
# self.assertEqual(outputs[0].shape, (1, 1000, 1, 1))
# self.assertAlmostEqual(outputs[0][0][0][0][0], 5.19026289e-05)
def test_predictor_memory_model(self):
workspace.ResetWorkspace()
m = self._create_model()
workspace.FeedBlob("data", np.zeros([4], dtype='float32'))
self.predictor = workspace.Predictor(
workspace.StringifyProto(m.param_init_net.Proto()),
workspace.StringifyProto(m.net.Proto()))
inputs = np.array([1, 3, 256, 256], dtype='float32')
outputs = self.predictor.run([inputs])
np.testing.assert_array_almost_equal(
np.array([[516, 516]], dtype='float32'), outputs)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3971ae954d9d7fa495e1b4c6091c67c8",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 79,
"avg_line_length": 40.76937984496124,
"alnum_prop": 0.6216190521462186,
"repo_name": "bwasti/caffe2",
"id": "e9b8a10a7f9d4c9de09cc6248d633fe8581b446e",
"size": "21037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe2/python/workspace_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4514"
},
{
"name": "C",
"bytes": "58731"
},
{
"name": "C++",
"bytes": "2743591"
},
{
"name": "CMake",
"bytes": "131386"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "455661"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Jupyter Notebook",
"bytes": "4615340"
},
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Metal",
"bytes": "29686"
},
{
"name": "Objective-C",
"bytes": "828"
},
{
"name": "Objective-C++",
"bytes": "147470"
},
{
"name": "Python",
"bytes": "2137478"
},
{
"name": "Shell",
"bytes": "20688"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_controllerproperties
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of ControllerProperties Avi RESTful Object
description:
- This module is used to configure ControllerProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
allow_ip_forwarding:
description:
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
allow_unauthenticated_apis:
description:
- Allow unauthenticated access for special apis.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
allow_unauthenticated_nodes:
description:
- Boolean flag to set allow_unauthenticated_nodes.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
api_idle_timeout:
description:
- Allowed values are 0-1440.
- Default value when not specified in API or module is interpreted by Avi Controller as 15.
api_perf_logging_threshold:
description:
- Threshold to log request timing in portal_performance.log and server-timing response header.
- Any stage taking longer than 1% of the threshold will be included in the server-timing header.
- Field introduced in 18.1.4, 18.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 10000.
version_added: "2.9"
appviewx_compat_mode:
description:
- Export configuration in appviewx compatibility mode.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
attach_ip_retry_interval:
description:
- Number of attach_ip_retry_interval.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
attach_ip_retry_limit:
description:
- Number of attach_ip_retry_limit.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
bm_use_ansible:
description:
- Use ansible for se creation in baremetal.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.5"
type: bool
cleanup_expired_authtoken_timeout_period:
description:
- Period for auth token cleanup job.
- Field introduced in 18.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
version_added: "2.9"
cleanup_sessions_timeout_period:
description:
- Period for sessions cleanup job.
- Field introduced in 18.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
version_added: "2.9"
cloud_reconcile:
description:
- Enable/disable periodic reconcile for all the clouds.
- Field introduced in 17.2.14,18.1.5,18.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.9"
type: bool
cluster_ip_gratuitous_arp_period:
description:
- Period for cluster ip gratuitous arp job.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
consistency_check_timeout_period:
description:
- Period for consistency check job.
- Field introduced in 18.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
version_added: "2.9"
crashed_se_reboot:
description:
- Number of crashed_se_reboot.
- Default value when not specified in API or module is interpreted by Avi Controller as 900.
dead_se_detection_timer:
description:
- Number of dead_se_detection_timer.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
dns_refresh_period:
description:
- Period for refresh pool and gslb dns job.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
dummy:
description:
- Number of dummy.
enable_api_sharding:
description:
- This setting enables the controller leader to shard api requests to the followers (if any).
- Field introduced in 18.1.5, 18.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.9"
type: bool
enable_memory_balancer:
description:
- Enable/disable memory balancer.
- Field introduced in 17.2.8.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.6"
type: bool
fatal_error_lease_time:
description:
- Number of fatal_error_lease_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
max_dead_se_in_grp:
description:
- Number of max_dead_se_in_grp.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
max_pcap_per_tenant:
description:
- Maximum number of pcap files stored per tenant.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
max_seq_attach_ip_failures:
description:
- Maximum number of consecutive attach ip failures that halts vs placement.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.
version_added: "2.5"
max_seq_vnic_failures:
description:
- Number of max_seq_vnic_failures.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.
persistence_key_rotate_period:
description:
- Period for rotate app persistence keys job.
- Allowed values are 1-1051200.
- Special values are 0 - 'disabled'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
portal_token:
description:
- Token used for uploading tech-support to portal.
- Field introduced in 16.4.6,17.1.2.
version_added: "2.4"
process_locked_useraccounts_timeout_period:
description:
- Period for process locked user accounts job.
- Field introduced in 18.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
version_added: "2.9"
process_pki_profile_timeout_period:
description:
- Period for process pki profile job.
- Field introduced in 18.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 1440.
version_added: "2.9"
query_host_fail:
description:
- Number of query_host_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 180.
safenet_hsm_version:
description:
- Version of the safenet package installed on the controller.
- Field introduced in 16.5.2,17.2.3.
version_added: "2.5"
se_create_timeout:
description:
- Number of se_create_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 900.
se_failover_attempt_interval:
description:
- Interval between attempting failovers to an se.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
se_from_marketplace:
description:
- This setting decides whether se is to be deployed from the cloud marketplace or to be created by the controller.
- The setting is applicable only when byol license is selected.
- Enum options - MARKETPLACE, IMAGE.
- Field introduced in 18.1.4, 18.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as IMAGE.
version_added: "2.9"
se_offline_del:
description:
- Number of se_offline_del.
- Default value when not specified in API or module is interpreted by Avi Controller as 172000.
se_vnic_cooldown:
description:
- Number of se_vnic_cooldown.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
secure_channel_cleanup_timeout:
description:
- Period for secure channel cleanup job.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
secure_channel_controller_token_timeout:
description:
- Number of secure_channel_controller_token_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
secure_channel_se_token_timeout:
description:
- Number of secure_channel_se_token_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
seupgrade_fabric_pool_size:
description:
- Pool size used for all fabric commands during se upgrade.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
seupgrade_segroup_min_dead_timeout:
description:
- Time to wait before marking segroup upgrade as stuck.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
ssl_certificate_expiry_warning_days:
description:
- Number of days for ssl certificate expiry warning.
unresponsive_se_reboot:
description:
- Number of unresponsive_se_reboot.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
upgrade_dns_ttl:
description:
- Time to account for dns ttl during upgrade.
- This is in addition to vs_scalein_timeout_for_upgrade in se_group.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.
upgrade_lease_time:
description:
- Number of upgrade_lease_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vnic_op_fail_time:
description:
- Number of vnic_op_fail_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 180.
vs_apic_scaleout_timeout:
description:
- Time to wait for the scaled out se to become ready before marking the scaleout done, applies to apic configuration only.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
vs_awaiting_se_timeout:
description:
- Number of vs_awaiting_se_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
vs_key_rotate_period:
description:
- Period for rotate vs keys job.
- Allowed values are 1-1051200.
- Special values are 0 - 'disabled'.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
vs_scaleout_ready_check_interval:
description:
- Interval for checking scaleout_ready status while controller is waiting for scaleoutready rpc from the service engine.
- Field introduced in 18.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
version_added: "2.9"
vs_se_attach_ip_fail:
description:
- Time to wait before marking attach ip operation on an se as failed.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 600.
version_added: "2.5"
vs_se_bootup_fail:
description:
- Number of vs_se_bootup_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 480.
vs_se_create_fail:
description:
- Number of vs_se_create_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
vs_se_ping_fail:
description:
- Number of vs_se_ping_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
vs_se_vnic_fail:
description:
- Number of vs_se_vnic_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
vs_se_vnic_ip_fail:
description:
- Number of vs_se_vnic_ip_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
warmstart_se_reconnect_wait_time:
description:
- Number of warmstart_se_reconnect_wait_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 480.
warmstart_vs_resync_wait_time:
description:
- Timeout for warmstart vs resync.
- Field introduced in 18.1.4, 18.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
version_added: "2.9"
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create ControllerProperties object
avi_controllerproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_controllerproperties
"""
RETURN = '''
obj:
description: ControllerProperties (api/controllerproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
allow_ip_forwarding=dict(type='bool',),
allow_unauthenticated_apis=dict(type='bool',),
allow_unauthenticated_nodes=dict(type='bool',),
api_idle_timeout=dict(type='int',),
api_perf_logging_threshold=dict(type='int',),
appviewx_compat_mode=dict(type='bool',),
attach_ip_retry_interval=dict(type='int',),
attach_ip_retry_limit=dict(type='int',),
bm_use_ansible=dict(type='bool',),
cleanup_expired_authtoken_timeout_period=dict(type='int',),
cleanup_sessions_timeout_period=dict(type='int',),
cloud_reconcile=dict(type='bool',),
cluster_ip_gratuitous_arp_period=dict(type='int',),
consistency_check_timeout_period=dict(type='int',),
crashed_se_reboot=dict(type='int',),
dead_se_detection_timer=dict(type='int',),
dns_refresh_period=dict(type='int',),
dummy=dict(type='int',),
enable_api_sharding=dict(type='bool',),
enable_memory_balancer=dict(type='bool',),
fatal_error_lease_time=dict(type='int',),
max_dead_se_in_grp=dict(type='int',),
max_pcap_per_tenant=dict(type='int',),
max_seq_attach_ip_failures=dict(type='int',),
max_seq_vnic_failures=dict(type='int',),
persistence_key_rotate_period=dict(type='int',),
portal_token=dict(type='str', no_log=True,),
process_locked_useraccounts_timeout_period=dict(type='int',),
process_pki_profile_timeout_period=dict(type='int',),
query_host_fail=dict(type='int',),
safenet_hsm_version=dict(type='str',),
se_create_timeout=dict(type='int',),
se_failover_attempt_interval=dict(type='int',),
se_from_marketplace=dict(type='str',),
se_offline_del=dict(type='int',),
se_vnic_cooldown=dict(type='int',),
secure_channel_cleanup_timeout=dict(type='int',),
secure_channel_controller_token_timeout=dict(type='int',),
secure_channel_se_token_timeout=dict(type='int',),
seupgrade_fabric_pool_size=dict(type='int',),
seupgrade_segroup_min_dead_timeout=dict(type='int',),
ssl_certificate_expiry_warning_days=dict(type='list',),
unresponsive_se_reboot=dict(type='int',),
upgrade_dns_ttl=dict(type='int',),
upgrade_lease_time=dict(type='int',),
url=dict(type='str',),
uuid=dict(type='str',),
vnic_op_fail_time=dict(type='int',),
vs_apic_scaleout_timeout=dict(type='int',),
vs_awaiting_se_timeout=dict(type='int',),
vs_key_rotate_period=dict(type='int',),
vs_scaleout_ready_check_interval=dict(type='int',),
vs_se_attach_ip_fail=dict(type='int',),
vs_se_bootup_fail=dict(type='int',),
vs_se_create_fail=dict(type='int',),
vs_se_ping_fail=dict(type='int',),
vs_se_vnic_fail=dict(type='int',),
vs_se_vnic_ip_fail=dict(type='int',),
warmstart_se_reconnect_wait_time=dict(type='int',),
warmstart_vs_resync_wait_time=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'controllerproperties',
set(['portal_token']))
if __name__ == '__main__':
main()
|
{
"content_hash": "4fe192c7c9eaae69a273541d1dc8360d",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 134,
"avg_line_length": 46.137529137529135,
"alnum_prop": 0.6325973829131512,
"repo_name": "thaim/ansible",
"id": "f88787ed24dc67d31fb1a1ef2a5843aa9b1a67c4",
"size": "20130",
"binary": false,
"copies": "27",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/avi/avi_controllerproperties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from scrapy.contrib.spiders.init import InitSpider
from scrapy.http import Request
from scrapy.selector import Selector
from decouple import config
import re
import json
import requests
from urlparse import urljoin
from scrapy.utils.project import get_project_settings as Settings
STGS = Settings()
CITY_FROM = 'REC'
CITY_TO = 'RIO'
NUMBER_ADULTS = 2
DATE_GO = '2014-09-20'
DATE_BACK = '2014-09-25'
class JSCallError(Exception):
def __init__(self, message):
super(JSCallError, self).__init__(message)
self.message = u'Erro na chamada da função.'
def response_to_file(name, response):
with open(name, 'wb') as f:
f.write(response.body)
class DecolarSpider(InitSpider):
name = "decolar"
allowed_domains = ["decolar.com.br"]
start_urls = [
'http://www.decolar.com/shop/flights/results/roundtrip/REC/RIO/2014-09-20/2014-09-25/1/0/0',
]
def init_request(self):
city1 = CITY_FROM
city2 = CITY_TO
date_go = DATE_GO
date_back = DATE_BACK
number_adults = NUMBER_ADULTS
u = 'http://www.decolar.com/shop/flights/results/roundtrip/{0}' \
'/{1}/{2}/{3}/{4}/0/0'.format(city1, city2, date_go, date_back,
number_adults, )
return Request(url=u, callback=self.get_url, meta={'referer': u, })
def get_url(self, response):
hxs = Selector(response)
scripts = hxs.xpath('//script[contains(text(), "search :")]')
url_code_line = scripts[0].extract().split('\n')[3]
url_part = re.findall(r"'(.*?)'", url_code_line)[0]
nurl = urljoin(self.start_urls[0], url_part)
referer = response.meta['referer']
import ipdb; ipdb.set_trace()
headers = {
'User-Agent': STGS['USER_AGENT'],
"Content-Type": "application/json",
'X-Requested-With': 'XMLHttpRequest',
"Referer": referer,
}
try:
post = requests.get(nurl, headers=headers)
except JSCallError, e:
raise e
consegui = post.json()
|
{
"content_hash": "e7d44dc595caa673e1fcaf05e60f2feb",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 100,
"avg_line_length": 27.842105263157894,
"alnum_prop": 0.5945179584120983,
"repo_name": "arthuralvim/palestra_pug_scrapy",
"id": "63c84c6a2624870565e307ba5d1bc256e9407969",
"size": "2143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "passagens/passagens/spiders/decolar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9191"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase, Client, override_settings
from mock import patch
from buildservice.models import Build, Repository
from buildservice.utils.testing import create_user_token
@override_settings(BUILDSERVICE_API_KEY='the_key')
class UpdateBuildStatusTestCase(TestCase):
def setUp(self):
self.client = Client()
self.dummy_url = reverse('api_build_status', args=('unknown/repo', '42'))
self.repo = Repository.objects.create(name='my/repo')
self.user = get_user_model().objects.create_user('wiqheq', password='ttt')
self.repo.users.add(self.user)
self.build = Build.objects.create(
repository=self.repo, branch='master',
sha='0000', pusher_name='mvdb'
)
self.url = reverse('api_build_status', args=('my/repo', self.build.number))
def test_get(self):
resp = self.client.get(self.dummy_url)
self.assertEqual(resp.status_code, 405)
def test_post_not_json(self):
resp = self.client.post(
self.dummy_url, data='hello',
content_type="application/json"
)
self.assertEqual(resp.status_code, 400)
def test_post_missing_status(self):
resp = self.client.post(
self.dummy_url + '?api_key=the_key', data='{"key": "value"}',
content_type="application/json"
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json(), {'error': 'Missing status field.'})
def test_post_json_not_dict(self):
resp = self.client.post(
self.dummy_url + '?api_key=the_key', data='[1, 2, 3]',
content_type="application/json"
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json(), {'error': 'Missing status field.'})
def test_post_no_api_key(self):
resp = self.client.post(
self.dummy_url, data='{"status": "success"}',
content_type="application/json"
)
self.assertEqual(resp.status_code, 401)
def test_post_unknown_build(self):
resp = self.client.post(
self.dummy_url + '?api_key=the_key', data='{"status": "success"}',
content_type="application/json"
)
self.assertEqual(resp.status_code, 404)
def test_post_missing_token(self):
resp = self.client.post(
self.url + '?api_key=the_key', data='{"status": "something"}',
content_type="application/json"
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json(), {'error': 'No token.'})
def test_post_bad_status(self):
create_user_token(self.user, self.repo)
resp = self.client.post(
self.url + '?api_key=the_key', data='{"status": "something"}',
content_type="application/json"
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json(), {'error': 'Invalid status.'})
@patch('buildservice.utils.github.create_status')
def test_post_ok(self, create_status):
token = create_user_token(self.user, self.repo)
resp = self.client.post(
self.url + '?api_key=the_key', data='{"status": "success"}',
content_type="application/json"
)
self.assertEqual(resp.status_code, 200)
self.build.refresh_from_db()
self.assertTrue(self.build.is_success)
create_status.assert_called_with(
token.value, 'my/repo', self.build.sha,
state='success', target_url=self.build.url
)
|
{
"content_hash": "8a7798a319af3506902205142bdf616a",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 83,
"avg_line_length": 38.177083333333336,
"alnum_prop": 0.606275579809004,
"repo_name": "m-vdb/github-buildservice-boilerplate",
"id": "2c15f5ef70e7cf4d9931b834e45d8ddc6a06fe9b",
"size": "3665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buildservice/tests/views/test_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1471"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "22923"
},
{
"name": "Shell",
"bytes": "137"
},
{
"name": "Smarty",
"bytes": "206"
}
],
"symlink_target": ""
}
|
"""
Recurrent Conditional GAN in keras.
It only contains the base class and subclass needs to create their own architecture
"""
from abc import abstractmethod
import tensorflow as tf
class RCGAN(object):
def __init__(self, input_dim, window_length, num_classes, code_size=64, learning_date=1e-4, batch_size=32,
tensorboard=False):
self.input_dim = input_dim
self.window_length = window_length
self.num_classes = num_classes
self.code_size = code_size
self.learning_rate = learning_date
self.batch_size = batch_size
self.tensorboard = tensorboard
self.generator = self._create_generator()
self.discriminator = self._create_discriminator()
self.discriminator_generator = self._combine_generator_discriminator()
@abstractmethod
def _create_discriminator(self):
""" This class also needs to make sure the model is compiled with optimizer """
raise NotImplementedError('Subclass must implement create discriminator')
@abstractmethod
def _create_generator(self):
""" This class also needs to make sure the model is compiled with optimizer """
raise NotImplementedError('Subclass must implement create generator')
@abstractmethod
def _combine_generator_discriminator(self):
raise NotImplementedError('Subclass must implement combine generator and discriminator')
def build_summary(self):
if self.tensorboard:
self.dis_loss = tf.Variable(0.)
tf.summary.scalar('dis_loss', self.dis_loss)
self.gen_loss = tf.Variable(0.)
tf.summary.scalar('gen_loss', self.gen_loss)
self.summary_ops = tf.summary.merge_all()
@abstractmethod
def train(self, train_samples, training_labels, num_epoch=5, log_step=50, verbose=True,
summary_path='./summary/rcgan'):
pass
@abstractmethod
def generate(self, codes, labels):
pass
|
{
"content_hash": "2811e13ac6429fdfbe04496dd0e3eb6b",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 110,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.6681749622926093,
"repo_name": "vermouth1992/tf-playground",
"id": "6b850e469af100678a08d64f36af98b4b68cb196",
"size": "1989",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gan/rcgan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "31024"
},
{
"name": "Python",
"bytes": "53839"
}
],
"symlink_target": ""
}
|
import pytest
import platform
import functools
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, TextAnalyticsPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from devtools_testutils import recorded_by_proxy
from azure.ai.textanalytics import (
DetectLanguageInput,
TextAnalyticsClient,
DetectLanguageInput,
VERSION,
TextAnalyticsApiVersion,
)
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestDetectLanguage(TextAnalyticsTest):
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_no_single_input(self, client):
with pytest.raises(TypeError):
response = client.detect_language("hello world")
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_all_successful_passing_dict(self, client):
docs = [{"id": "1", "text": "I should take my cat to the veterinarian."},
{"id": "2", "text": "Este es un document escrito en Español."},
{"id": "3", "text": "猫は幸せ"},
{"id": "4", "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."}]
response = client.detect_language(docs, show_stats=True)
assert response[0].primary_language.name == "English"
# assert response[1].primary_language.name == "Spanish" # https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/10363878
assert response[2].primary_language.name == "Japanese"
assert response[3].primary_language.name == "German"
assert response[0].primary_language.iso6391_name == "en"
# assert response[1].primary_language.iso6391_name == "es" # https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/10363878
assert response[2].primary_language.iso6391_name == "ja"
assert response[3].primary_language.iso6391_name == "de"
for doc in response:
assert doc.id is not None
assert doc.statistics is not None
assert doc.primary_language.confidence_score is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_all_successful_passing_text_document_input(self, client):
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian"),
DetectLanguageInput(id="2", text="Este es un document escrito en Español."),
DetectLanguageInput(id="3", text="猫は幸せ"),
DetectLanguageInput(id="4", text="Fahrt nach Stuttgart und dann zum Hotel zu Fu.")
]
response = client.detect_language(docs)
assert response[0].primary_language.name == "English"
# assert response[1].primary_language.name == "Spanish" # https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/10363878
assert response[2].primary_language.name == "Japanese"
assert response[3].primary_language.name == "German"
assert response[0].primary_language.iso6391_name == "en"
# assert response[1].primary_language.iso6391_name == "es" # https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/10363878
assert response[2].primary_language.iso6391_name == "ja"
assert response[3].primary_language.iso6391_name == "de"
for doc in response:
assert doc.primary_language.confidence_score is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_passing_only_string(self, client):
docs = [
"I should take my cat to the veterinarian.",
"Este es un document escrito en Español.",
"猫は幸せ",
"Fahrt nach Stuttgart und dann zum Hotel zu Fu.",
""
]
response = client.detect_language(docs)
assert response[0].primary_language.name == "English"
# assert response[1].primary_language.name == "Spanish" # https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/10363878
assert response[2].primary_language.name == "Japanese"
assert response[3].primary_language.name == "German"
assert response[4].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_input_with_some_errors(self, client):
docs = [{"id": "1", "country_hint": "United States", "text": "I should take my cat to the veterinarian."},
{"id": "2", "text": "Este es un document escrito en Español."},
{"id": "3", "text": ""},
{"id": "4", "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."}]
response = client.detect_language(docs)
assert response[0].is_error
assert not response[1].is_error
assert response[2].is_error
assert not response[3].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_input_with_all_errors(self, client):
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "text": ""},
{"id": "3", "text": ""},
{"id": "4", "text": text}]
response = client.detect_language(docs)
for resp in response:
assert resp.is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_output_same_order_as_input(self, client):
docs = [
DetectLanguageInput(id="1", text="one"),
DetectLanguageInput(id="2", text="two"),
DetectLanguageInput(id="3", text="three"),
DetectLanguageInput(id="4", text="four"),
DetectLanguageInput(id="5", text="five")
]
response = client.detect_language(docs)
for idx, doc in enumerate(response):
assert str(idx + 1) == doc.id
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": ""})
@recorded_by_proxy
def test_empty_credential_class(self, client):
with pytest.raises(ClientAuthenticationError):
response = client.detect_language(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": "xxxxxxxxxxxx"})
@recorded_by_proxy
def test_bad_credentials(self, client):
with pytest.raises(ClientAuthenticationError):
response = client.detect_language(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_bad_document_input(self, client):
docs = "This is the wrong type"
with pytest.raises(TypeError):
response = client.detect_language(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_mixing_inputs(self, client):
docs = [
{"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
DetectLanguageInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."),
"You cannot mix string input with the above inputs"
]
with pytest.raises(TypeError):
response = client.detect_language(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_out_of_order_ids(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.detect_language(docs)
in_order = ["56", "0", "22", "19", "1"]
for idx, resp in enumerate(response):
assert resp.id == in_order[idx]
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_show_stats_and_model_version(self, client):
def callback(response):
assert response is not None
assert response.model_version
assert response.raw_response is not None
assert response.statistics.document_count == 5
assert response.statistics.transaction_count == 4
assert response.statistics.valid_document_count == 4
assert response.statistics.erroneous_document_count == 1
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.detect_language(
docs,
show_stats=True,
model_version="latest",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_batch_size_over_limit(self, client):
docs = ["hello world"] * 1050
with pytest.raises(HttpResponseError):
response = client.detect_language(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_country_hint(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
assert country == 3
docs = [
"This was the best day of my life.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant was not as good as I hoped."
]
response = client.detect_language(docs, country_hint="CA", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_dont_use_country_hint(self, client):
def callback(resp):
country_str = "\"countryHint\": \"\""
country = resp.http_request.body.count(country_str)
assert country == 3
docs = [
"This was the best day of my life.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant was not as good as I hoped."
]
response = client.detect_language(docs, country_hint="", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_per_item_dont_use_country_hint(self, client):
def callback(resp):
country_str = "\"countryHint\": \"\""
country = resp.http_request.body.count(country_str)
assert country == 2
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
assert country == 1
docs = [{"id": "1", "country_hint": "", "text": "I will go to the park."},
{"id": "2", "country_hint": "", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.detect_language(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_country_hint_and_obj_input(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
assert country == 3
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian."),
DetectLanguageInput(id="2", text="Este es un document escrito en Español."),
DetectLanguageInput(id="3", text="猫は幸せ"),
]
response = client.detect_language(docs, country_hint="CA", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_country_hint_and_dict_input(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
assert country == 3
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.detect_language(docs, country_hint="CA", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_country_hint_and_obj_per_item_hints(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
assert country == 2
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
assert country == 1
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian.", country_hint="CA"),
DetectLanguageInput(id="4", text="Este es un document escrito en Español.", country_hint="CA"),
DetectLanguageInput(id="3", text="猫は幸せ"),
]
response = client.detect_language(docs, country_hint="US", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_whole_batch_country_hint_and_dict_per_item_hints(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
assert country == 1
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
assert country == 2
docs = [{"id": "1", "country_hint": "US", "text": "I will go to the park."},
{"id": "2", "country_hint": "US", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.detect_language(docs, country_hint="CA", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"default_country_hint": "CA"})
@recorded_by_proxy
def test_client_passed_default_country_hint(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
assert country == 3
def callback_2(resp):
country_str = "\"countryHint\": \"DE\""
country = resp.http_request.body.count(country_str)
assert country == 3
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.detect_language(docs, raw_response_hook=callback)
response = client.detect_language(docs, country_hint="DE", raw_response_hook=callback_2)
response = client.detect_language(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@recorded_by_proxy
def test_rotate_subscription_key(self, textanalytics_test_endpoint, textanalytics_test_api_key):
credential = AzureKeyCredential(textanalytics_test_api_key)
client = TextAnalyticsClient(textanalytics_test_endpoint, credential)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.detect_language(docs)
assert response is not None
credential.update("xxx") # Make authentication fail
with pytest.raises(ClientAuthenticationError):
response = client.detect_language(docs)
credential.update(textanalytics_test_api_key) # Authenticate successfully again
response = client.detect_language(docs)
assert response is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_user_agent(self, client):
def callback(resp):
assert "azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()) in \
resp.http_request.headers["User-Agent"]
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = client.detect_language(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_document_attribute_error_no_result_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = client.detect_language(docs)
# Attributes on DocumentError
assert response[0].is_error
assert response[0].id == "1"
assert response[0].error is not None
# Result attribute not on DocumentError, custom error message
try:
primary_language = response[0].primary_language
except AttributeError as custom_error:
assert custom_error.args[0] == \
'\'DocumentError\' object has no attribute \'primary_language\'. ' \
'The service was unable to process this document:\nDocument Id: 1\nError: ' \
'InvalidDocument - Document text is empty.\n'
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_document_attribute_error_nonexistent_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = client.detect_language(docs)
# Attribute not found on DocumentError or result obj, default behavior/message
try:
primary_language = response[0].attribute_not_on_result_or_error
except AttributeError as default_behavior:
assert default_behavior.args[0] == '\'DocumentError\' object has no attribute \'attribute_not_on_result_or_error\''
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_bad_model_version_error(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
try:
result = client.detect_language(docs, model_version="bad")
except HttpResponseError as err:
assert err.error.code == "ModelVersionIncorrect"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_document_errors(self, client):
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "text": text}]
doc_errors = client.detect_language(docs)
assert doc_errors[0].error.code == "InvalidDocument"
assert doc_errors[0].error.message is not None
assert doc_errors[1].error.code == "InvalidDocument"
assert doc_errors[1].error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_document_warnings(self, client):
# No warnings actually returned for detect_language. Will update when they add
docs = [
{"id": "1", "text": "This won't actually create a warning :'("},
]
result = client.detect_language(docs)
for doc in result:
doc_warnings = doc.warnings
assert len(doc_warnings) == 0
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_not_passing_list_for_docs(self, client):
docs = {"id": "1", "text": "hello world"}
with pytest.raises(TypeError) as excinfo:
client.detect_language(docs)
assert "Input documents cannot be a dict" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
client.detect_language(docs)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
client.detect_language(None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_duplicate_ids_error(self, client):
# Duplicate Ids
docs = [{"id": "1", "text": "hello world"},
{"id": "1", "text": "I did not like the hotel we stayed at."}]
try:
result = client.detect_language(docs)
except HttpResponseError as err:
assert err.error.code == "InvalidDocument"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_batch_size_over_limit_error(self, client):
# Batch size over limit
docs = ["hello world"] * 1001
try:
response = client.detect_language(docs)
except HttpResponseError as err:
assert err.error.code == "InvalidDocumentBatch"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_invalid_country_hint_method(self, client):
docs = [{"id": "1", "text": "hello world"}]
response = client.detect_language(docs, country_hint="United States")
assert response[0].error.code == "InvalidCountryHint"
assert response[0].error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_invalid_country_hint_docs(self, client):
docs = [{"id": "1", "country_hint": "United States", "text": "hello world"}]
response = client.detect_language(docs)
assert response[0].error.code == "InvalidCountryHint"
assert response[0].error.message is not None
@TextAnalyticsPreparer()
@recorded_by_proxy
def test_country_hint_none(self, textanalytics_test_endpoint, textanalytics_test_api_key):
client = TextAnalyticsClient(textanalytics_test_endpoint, AzureKeyCredential(textanalytics_test_api_key))
# service will eventually support this and we will not need to send "" for input == "none"
documents = [{"id": "0", "country_hint": "none", "text": "This is written in English."}]
documents2 = [DetectLanguageInput(id="1", country_hint="none", text="This is written in English.")]
def callback(response):
country_str = "\"countryHint\": \"\""
country = response.http_request.body.count(country_str)
assert country == 1
# test dict
result = client.detect_language(documents, raw_response_hook=callback)
# test DetectLanguageInput
result2 = client.detect_language(documents2, raw_response_hook=callback)
# test per-operation
result3 = client.detect_language(documents=["this is written in english"], country_hint="none", raw_response_hook=callback)
# test client default
new_client = TextAnalyticsClient(textanalytics_test_endpoint, AzureKeyCredential(textanalytics_test_api_key), default_country_hint="none")
result4 = new_client.detect_language(documents=["this is written in english"], raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_country_hint_kwarg(self, client):
def callback(response):
country_str = "\"countryHint\": \"ES\""
assert response.http_request.body.count(country_str) == 1
assert response.model_version is not None
assert response.statistics is not None
res = client.detect_language(
documents=["this is written in english"],
model_version="latest",
show_stats=True,
country_hint="ES",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = client.detect_language(
documents=["Test passing cls to endpoint"],
cls=callback
)
assert res == "cls result"
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
@recorded_by_proxy
def test_string_index_type_not_fail_v3(self, client):
# make sure that the addition of the string_index_type kwarg for v3.1-preview.1 doesn't
# cause v3.0 calls to fail
client.detect_language(["please don't fail"])
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1})
@recorded_by_proxy
def test_disable_service_logs(self, client):
def callback(resp):
assert resp.http_request.query['loggingOptOut']
client.detect_language(
documents=["Test for logging disable"],
disable_service_logs=True,
raw_response_hook=callback,
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V2022_05_01})
@recorded_by_proxy
def test_disable_service_logs_body_param(self, client):
def callback(resp):
import json
assert json.loads(resp.http_request.body)['parameters']['loggingOptOut']
client.detect_language(
documents=["Test for logging disable"],
disable_service_logs=True,
raw_response_hook=callback,
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": "v3.0"})
async def test_language_multiapi_validate_args_v3_0(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError) as e:
res = await client.detect_language(["I'm tired"], disable_service_logs=True)
assert str(e.value) == "'disable_service_logs' is not available in API version v3.0. Use service API version v3.1 or newer.\n"
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_detect_language_script(self, client):
docs = ["Tumhara naam kya hai?"]
response = client.detect_language(docs, model_version="2022-04-10-preview")
assert response[0].primary_language.script == "Latin"
assert response[0].primary_language.iso6391_name == "hi"
|
{
"content_hash": "1fa8f8fc4a5425d8abb70bb2048e7166",
"timestamp": "",
"source": "github",
"line_count": 672,
"max_line_length": 147,
"avg_line_length": 41.117559523809526,
"alnum_prop": 0.620896818790489,
"repo_name": "Azure/azure-sdk-for-python",
"id": "aa1013abb7a12d7f4282c8cc6cc6167ce150d9f4",
"size": "27829",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import pip
from .. import click, sync
from ..exceptions import PipToolsError
from ..logging import log
from ..utils import assert_compatible_pip_version, flat_map
# Make sure we're using a compatible version of pip
assert_compatible_pip_version()
DEFAULT_REQUIREMENTS_FILE = 'requirements.txt'
@click.command()
@click.version_option()
@click.option('-n', '--dry-run', is_flag=True, help="Only show what would happen, don't change anything")
@click.option('--force', is_flag=True, help="Proceed even if conflicts are found")
@click.option('-f', '--find-links', multiple=True, help="Look for archives in this directory or on this HTML page", envvar='PIP_FIND_LINKS') # noqa
@click.option('-i', '--index-url', help="Change index URL (defaults to PyPI)", envvar='PIP_INDEX_URL')
@click.option('--extra-index-url', multiple=True, help="Add additional index URL to search", envvar='PIP_EXTRA_INDEX_URL') # noqa
@click.option('--no-index', is_flag=True, help="Ignore package index (only looking at --find-links URLs instead)")
@click.option('-q', '--quiet', default=False, is_flag=True, help="Give less output")
@click.argument('src_files', required=False, type=click.Path(exists=True), nargs=-1)
def cli(dry_run, force, find_links, index_url, extra_index_url, no_index, quiet, src_files):
"""Synchronize virtual environment with requirements.txt."""
if not src_files:
if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
src_files = (DEFAULT_REQUIREMENTS_FILE,)
else:
msg = 'No requirement files given and no {} found in the current directory'
log.error(msg.format(DEFAULT_REQUIREMENTS_FILE))
sys.exit(2)
if any(src_file.endswith('.in') for src_file in src_files):
msg = ('Some input files have the .in extension, which is most likely an error and can '
'cause weird behaviour. You probably meant to use the corresponding *.txt file?')
if force:
log.warning('WARNING: ' + msg)
else:
log.error('ERROR: ' + msg)
sys.exit(2)
requirements = flat_map(lambda src: pip.req.parse_requirements(src, session=True),
src_files)
try:
requirements = sync.merge(requirements, ignore_conflicts=force)
except PipToolsError as e:
log.error(str(e))
sys.exit(2)
installed_dists = pip.get_installed_distributions(skip=[])
to_install, to_uninstall = sync.diff(requirements, installed_dists)
install_flags = []
for link in find_links or []:
install_flags.extend(['-f', link])
if no_index:
install_flags.append('--no-index')
if index_url:
install_flags.extend(['-i', index_url])
if extra_index_url:
for extra_index in extra_index_url:
install_flags.extend(['--extra-index-url', extra_index])
sys.exit(sync.sync(to_install, to_uninstall, verbose=(not quiet), dry_run=dry_run,
install_flags=install_flags))
|
{
"content_hash": "558a48987c2f67b72edd055cf1f98cc3",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 148,
"avg_line_length": 42.84931506849315,
"alnum_prop": 0.6547314578005116,
"repo_name": "nateprewitt/pipenv",
"id": "e1d7f5e534d0948427d599f92440394e5bdd268a",
"size": "3144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipenv/patched/piptools/scripts/sync.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2589069"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
}
|
import datetime
import dateutil.relativedelta
from django.utils import timezone
import mock
from nose.tools import * # flake8: noqa
import pytest
from urlparse import urlparse
from api.base.settings.defaults import API_BASE
from api_tests.nodes.views.test_node_draft_registration_list import DraftRegistrationTestCase
from api_tests.registrations.filters.test_filters import RegistrationListFilteringMixin
from django.db.models import Q
from framework.auth.core import Auth
from osf.models import MetaSchema, DraftRegistration
from osf_tests.factories import (
EmbargoFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
DraftRegistrationFactory,
)
from rest_framework import exceptions
from tests.base import ApiTestCase
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from website.views import find_bookmark_collection
from website.util import permissions
class TestRegistrationList(ApiTestCase):
def setUp(self):
super(TestRegistrationList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False, creator=self.user)
self.registration_project = RegistrationFactory(creator=self.user, project=self.project)
self.url = '/{}registrations/'.format(API_BASE)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_registration_project = RegistrationFactory(creator=self.user, project=self.public_project, is_public=True)
self.user_two = AuthUserFactory()
def test_return_public_registrations_logged_out(self):
res = self.app.get(self.url)
assert_equal(len(res.json['data']), 1)
assert_equal(res.status_code, 200)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
url = res.json['data'][0]['relationships']['registered_from']['links']['related']['href']
assert_equal(urlparse(url).path, '/{}nodes/{}/'.format(API_BASE, self.public_project._id))
def test_return_registrations_logged_in_contributor(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(len(res.json['data']), 2)
assert_equal(res.status_code, 200)
registered_from_one = urlparse(res.json['data'][0]['relationships']['registered_from']['links']['related']['href']).path
registered_from_two = urlparse(res.json['data'][1]['relationships']['registered_from']['links']['related']['href']).path
assert_equal(res.content_type, 'application/vnd.api+json')
assert_items_equal([registered_from_one, registered_from_two],
['/{}nodes/{}/'.format(API_BASE, self.public_project._id),
'/{}nodes/{}/'.format(API_BASE, self.project._id)])
def test_return_registrations_logged_in_non_contributor(self):
res = self.app.get(self.url, auth=self.user_two.auth)
assert_equal(len(res.json['data']), 1)
assert_equal(res.status_code, 200)
registered_from = urlparse(res.json['data'][0]['relationships']['registered_from']['links']['related']['href']).path
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(registered_from, '/{}nodes/{}/'.format(API_BASE, self.public_project._id))
def test_total_biographic_contributor_in_registration(self):
user3 = AuthUserFactory()
registration = RegistrationFactory(is_public=True, creator=self.user)
registration.add_contributor(self.user_two, auth=Auth(self.user))
registration.add_contributor(user3, auth=Auth(self.user), visible=False)
registration.save()
registration_url = '/{0}registrations/{1}/?embed=contributors'.format(API_BASE, registration._id)
res = self.app.get(registration_url)
assert_true(res.json['data']['embeds']['contributors']['links']['meta']['total_bibliographic'])
assert_equal(res.json['data']['embeds']['contributors']['links']['meta']['total_bibliographic'], 2)
def test_exclude_nodes_from_registrations_endpoint(self):
res = self.app.get(self.url, auth=self.user.auth)
ids = [each['id'] for each in res.json['data']]
assert_in(self.registration_project._id, ids)
assert_in(self.public_registration_project._id, ids)
assert_not_in(self.public_project._id, ids)
assert_not_in(self.project._id, ids)
class TestRegistrationFiltering(ApiTestCase):
def setUp(self):
super(TestRegistrationFiltering, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project_one = ProjectFactory(title="Project One", description='Two', is_public=True, creator=self.user_one, category='hypothesis')
self.project_two = ProjectFactory(title="Project Two", description="One Three", is_public=True, creator=self.user_one)
self.project_three = ProjectFactory(title="Three", is_public=True, creator=self.user_two)
self.private_project_user_one = ProjectFactory(title="Private Project User One",
is_public=False,
creator=self.user_one)
self.private_project_user_two = ProjectFactory(title="Private Project User Two",
is_public=False,
creator=self.user_two)
self.project_one.add_tag('tag1', Auth(self.project_one.creator), save=False)
self.project_one.add_tag('tag2', Auth(self.project_one.creator), save=False)
self.project_one.save()
self.project_two.add_tag('tag1', Auth(self.project_two.creator), save=True)
self.project_two.save()
self.project_one_reg = RegistrationFactory(creator=self.user_one, project=self.project_one, is_public=True)
self.project_two_reg = RegistrationFactory(creator=self.user_one, project=self.project_two, is_public=True)
self.project_three_reg = RegistrationFactory(creator=self.user_two, project=self.project_three, is_public=True)
self.private_project_user_one_reg = RegistrationFactory(creator=self.user_one, project=self.private_project_user_one, is_public=False)
self.private_project_user_two_reg = RegistrationFactory(creator=self.user_two, project=self.private_project_user_two, is_public=False)
self.folder = CollectionFactory()
self.bookmark_collection = find_bookmark_collection(self.user_one)
self.url = "/{}registrations/".format(API_BASE)
def test_filtering_by_category(self):
url = '/{}registrations/?filter[category]=hypothesis'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
registration_json = res.json['data']
ids = [each['id'] for each in registration_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
def test_filtering_by_public(self):
url = '/{}registrations/?filter[public]=false'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
# No public projects returned
assert_false(
any([each['attributes']['public'] for each in reg_json])
)
ids = [each['id'] for each in reg_json]
assert_not_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
url = '/{}registrations/?filter[public]=true'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
# No private projects returned
assert_true(
all([each['attributes']['public'] for each in reg_json])
)
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
def test_filtering_tags(self):
# both project_one and project_two have tag1
url = '/{}registrations/?filter[tags]={}'.format(API_BASE, 'tag1')
res = self.app.get(url, auth=self.project_one.creator.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
# filtering two tags
# project_one has both tags; project_two only has one
url = '/{}registrations/?filter[tags]={}&filter[tags]={}'.format(API_BASE, 'tag1', 'tag2')
res = self.app.get(url, auth=self.project_one.creator.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
def test_filtering_tags_exact(self):
self.project_one.add_tag('cats', Auth(self.user_one))
self.project_two.add_tag('cats', Auth(self.user_one))
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(creator=self.user_one, project=self.project_one, is_public=True)
self.project_two_reg = RegistrationFactory(creator=self.user_one, project=self.project_two, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_tags_capitalized_query(self):
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=CAT'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_tags_capitalized_tag(self):
self.project_one.add_tag('CAT', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_on_multiple_tags(self):
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one.add_tag('sand', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat&filter[tags]=sand'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_on_multiple_tags_must_match_both(self):
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat&filter[tags]=sand'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 0)
def test_filtering_tags_returns_distinct(self):
# regression test for returning multiple of the same file
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one.add_tag('cAt', Auth(self.user_one))
self.project_one.add_tag('caT', Auth(self.user_one))
self.project_one.add_tag('CAT', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_contributors(self):
res = self.app.get(
'/{}registrations/?filter[contributors]={}'.format(
API_BASE, self.user_one._id
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 3)
def test_filtering_contributors_bad_id(self):
res = self.app.get(
'/{}registrations/?filter[contributors]=acatdresseduplikeahuman'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 0)
def test_get_all_registrations_with_no_filter_logged_in(self):
res = self.app.get(self.url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_in(self.project_three_reg._id, ids)
assert_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.project_one._id, ids)
assert_not_in(self.project_two._id, ids)
assert_not_in(self.project_three._id, ids)
assert_not_in(self.private_project_user_one._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_all_registrations_with_no_filter_not_logged_in(self):
res = self.app.get(self.url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.project_one._id, ids)
assert_not_in(self.project_two._id, ids)
assert_not_in(self.project_three._id, ids)
assert_not_in(self.private_project_user_one._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_one_registration_with_exact_filter_logged_in(self):
url = "/{}registrations/?filter[title]=Project%20One".format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_one_registration_with_exact_filter_not_logged_in(self):
url = "/{}registrations/?filter[title]=Private%20Project%20User%20One".format(API_BASE)
res = self.app.get(url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_not_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_some_registrations_with_substring_logged_in(self):
url = "/{}registrations/?filter[title]=Two".format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_not_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_some_registrations_with_substring_not_logged_in(self):
url = "/{}registrations/?filter[title]=One".format(API_BASE)
res = self.app.get(url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_only_public_or_my_registrations_with_filter_logged_in(self):
url = "/{}registrations/?filter[title]=Project".format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_only_public_registrations_with_filter_not_logged_in(self):
url = "/{}registrations/?filter[title]=Project".format(API_BASE)
res = self.app.get(url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_alternate_filtering_field_logged_in(self):
url = "/{}registrations/?filter[description]=Three".format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_not_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_alternate_filtering_field_not_logged_in(self):
url = "/{}registrations/?filter[description]=Two".format(API_BASE)
res = self.app.get(url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_incorrect_filtering_field_not_logged_in(self):
url = '/{}registrations/?filter[notafield]=bogus'.format(API_BASE)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], "'notafield' is not a valid field for this endpoint.")
class TestRegistrationCreate(DraftRegistrationTestCase):
@pytest.fixture()
def schema(self):
return MetaSchema.objects.get(name='Replication Recipe (Brandt et al., 2013): Post-Completion', schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def draft_registration(self, user, project_public, schema):
return DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public,
registration_metadata = {
'item29': {'value': 'Yes'},
'item33': {'value': 'success'}
}
)
@pytest.fixture()
def url_registrations(self, project_public):
return '/{}nodes/{}/registrations/'.format(API_BASE, project_public._id)
@pytest.fixture()
def payload(self, draft_registration):
return {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'immediate'
}
}
}
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_admin_can_create_registration(self, mock_enqueue, app, user, payload, url_registrations):
res = app.post_json_api(url_registrations, payload, auth=user.auth)
data = res.json['data']['attributes']
assert res.status_code == 201
assert data['registration'] is True
assert data['pending_registration_approval'] is True
assert data['public'] is False
def test_cannot_create_registration(self, app, user_write_contrib, user_read_contrib, payload, url_registrations):
# def test_write_only_contributor_cannot_create_registration(self):
res = app.post_json_api(url_registrations, payload, auth=user_write_contrib.auth, expect_errors=True)
assert res.status_code == 403
# def test_read_only_contributor_cannot_create_registration(self):
res = app.post_json_api(url_registrations, payload, auth=user_read_contrib.auth, expect_errors=True)
assert res.status_code == 403
# def test_non_authenticated_user_cannot_create_registration(self):
res = app.post_json_api(url_registrations, payload, expect_errors=True)
assert res.status_code == 401
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_registration_draft_must_be_specified(self, mock_enqueue, app, user, url_registrations):
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate'
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes/draft_registration'
assert res.json['errors'][0]['detail'] == 'This field is required.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_registration_draft_must_be_valid(self, mock_enqueue, app, user, url_registrations):
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': '12345'
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 404
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_registration_draft_must_be_draft_of_current_node(self, mock_enqueue, app, user, schema, url_registrations):
project_new = ProjectFactory(creator=user)
draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_new,
registration_metadata = {
'item29': {'value': 'Yes'},
'item33': {'value': 'success'}
}
)
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': draft_registration._id
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This draft registration is not created from the given node.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_required_top_level_questions_must_be_answered_on_draft(self, mock_enqueue, app, user, project_public, prereg_metadata, url_registrations):
prereg_schema = MetaSchema.objects.get(name='Prereg Challenge', schema_version=LATEST_SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
registration_metadata = prereg_metadata(prereg_draft_registration)
del registration_metadata['q1']
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': prereg_draft_registration._id,
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'u\'q1\' is a required property'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_required_top_level_questions_must_be_answered_on_draft(self, mock_enqueue, app, user, project_public, prereg_metadata, url_registrations):
prereg_schema = MetaSchema.objects.get(name='Prereg Challenge', schema_version=LATEST_SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
registration_metadata = prereg_metadata(prereg_draft_registration)
del registration_metadata['q1']
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': prereg_draft_registration._id,
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'u\'q1\' is a required property'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_required_second_level_questions_must_be_answered_on_draft(self, mock_enqueue, app, user, project_public, prereg_metadata, url_registrations):
prereg_schema = MetaSchema.objects.get(name='Prereg Challenge', schema_version=LATEST_SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
registration_metadata = prereg_metadata(prereg_draft_registration)
registration_metadata['q11'] = {'value': {}}
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': prereg_draft_registration._id,
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'u\'question\' is a required property'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_required_third_level_questions_must_be_answered_on_draft(self, mock_enqueue, app, user, project_public, prereg_metadata, url_registrations):
prereg_schema = MetaSchema.objects.get(name='Prereg Challenge', schema_version=LATEST_SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
registration_metadata = prereg_metadata(prereg_draft_registration)
registration_metadata['q11'] = {'value': {"question": {}}}
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': prereg_draft_registration._id,
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == '\'value\' is a required property'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_multiple_choice_in_registration_schema_must_match_one_of_choices(self, mock_enqueue, app, user, project_public, schema, payload, url_registrations):
draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public,
registration_metadata = {
'item29': {'value': 'Yes'},
'item33': {'value': 'success!'}
}
)
payload['data']['attributes']['draft_registration'] = draft_registration._id
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert (res.json['errors'][0]['detail'] == 'u\'success!\' is not one of [u\'success\', u\'informative failure to replicate\','
' u\'practical failure to replicate\', u\'inconclusive\']')
def test_invalid_registration_choice(self, app, user, draft_registration, payload, url_registrations):
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'tomorrow'
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes/registration_choice'
assert res.json['errors'][0]['detail'] == '"tomorrow" is not a valid choice.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_embargo_end_date_provided_if_registration_choice_is_embargo(self, mock_enqueue, app, user, draft_registration, url_registrations):
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo'
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'lift_embargo must be specified.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_embargo_must_be_less_than_four_years(self, mock_enqueue, app, user, draft_registration, url_registrations):
today = timezone.now()
five_years = (today + dateutil.relativedelta.relativedelta(years=5)).strftime('%Y-%m-%dT%H:%M:%S')
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo',
'lift_embargo': five_years
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registrations can only be embargoed for up to four years.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_embargo_registration(self, mock_enqueue, app, user, draft_registration, url_registrations):
today = timezone.now()
next_week = (today + dateutil.relativedelta.relativedelta(months=1)).strftime('%Y-%m-%dT%H:%M:%S')
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo',
'lift_embargo': next_week
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 201
data = res.json['data']['attributes']
assert data['registration'] is True
assert data['pending_embargo_approval'] is True
def test_embargo_end_date_must_be_in_the_future(self, app, user, draft_registration, url_registrations):
today = timezone.now().strftime('%Y-%m-%dT%H:%M:%S')
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo',
'lift_embargo': today
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Embargo end date must be at least three days in the future.'
def test_invalid_embargo_end_date_format(self, app, user, draft_registration, url_registrations):
today = timezone.now().isoformat()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo',
'lift_embargo': today
}
}
}
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm:ss.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_cannot_register_draft_that_has_already_been_registered(self, mock_enqueue, app, user, payload, draft_registration, url_registrations):
draft_registration.register(auth=Auth(user), save=True)
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'This draft has already been registered and cannot be modified.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_cannot_register_draft_that_is_pending_review(self, mock_enqueue, app, user, payload, url_registrations):
with mock.patch.object(DraftRegistration, 'is_pending_review', mock.PropertyMock(return_value=True)):
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'This draft is pending review and cannot be modified.'
def test_cannot_register_draft_that_has_already_been_approved(self, app, user, payload, url_registrations):
with mock.patch.object(DraftRegistration, 'requires_approval', mock.PropertyMock(return_value=True)), mock.patch.object(DraftRegistration, 'is_approved', mock.PropertyMock(return_value=True)):
res = app.post_json_api(url_registrations, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'This draft has already been approved and cannot be modified.'
@pytest.mark.django_db
class TestRegistrationBulkUpdate:
@pytest.fixture()
def url(self):
return '/{}registrations/'.format(API_BASE)
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def registration_one(self, user):
return RegistrationFactory(creator=user, title='Birds', embargo=EmbargoFactory(user=user), is_public=False)
@pytest.fixture()
def registration_two(self, user):
return RegistrationFactory(creator=user, title='Birds II', embargo=EmbargoFactory(user=user), is_public=False)
@pytest.fixture()
def private_payload(self, registration_one, registration_two):
return {
'data': [
{
'id': registration_one._id,
'type': 'registrations',
'attributes': {
'public': False
}
},
{
'id': registration_two._id,
'type': 'registrations',
'attributes': {
'public': False
}
}
]
}
@pytest.fixture()
def public_payload(self, registration_one, registration_two):
return {
'data': [
{
'id': registration_one._id,
'type': 'registrations',
'attributes': {
'public': True
}
},
{
'id': registration_two._id,
'type': 'registrations',
'attributes': {
'public': True
}
}
]
}
@pytest.fixture()
def empty_payload(self, registration_one, registration_two):
return {
'data': [
{
'id': registration_one._id,
'type': 'registrations',
'attributes': {}
},
{
'id': registration_two._id,
'type': 'registrations',
'attributes': {}
}
]
}
@pytest.fixture()
def bad_payload(self, registration_one, registration_two):
return {
'data': [
{
'id': registration_one._id,
'type': 'registrations',
'attributes': {
'public': True,
}
},
{
'id': registration_two._id,
'type': 'registrations',
'attributes': {
'title': 'Nerds II: Attack of the Nerds',
}
}
]
}
def test_bulk_update_errors(self, app, user, registration_one, registration_two, public_payload, private_payload, empty_payload, bad_payload, url):
# test_bulk_update_registrations_blank_request
res = app.put_json_api(url, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 400
# test_bulk_update_registrations_one_not_found
payload = {'data': [
{
'id': '12345',
'type': 'registrations',
'attributes': {
'public': True,
}
}, public_payload['data'][0]
]}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Could not find all objects to update.'
# test_bulk_update_registrations_logged_out
res = app.put_json_api(url, public_payload, expect_errors=True, bulk=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_bulk_update_registrations_logged_in_non_contrib
non_contrib = AuthUserFactory()
res = app.put_json_api(url, private_payload, auth=non_contrib.auth, expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_bulk_update_registrations_send_dictionary_not_list
res = app.put_json_api(url, {'data': {'id': registration_one._id, 'type': 'nodes',
'attributes': {'public': True}}},
auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
# test_bulk_update_id_not_supplied
res = app.put_json_api(url, {'data': [public_payload['data'][1], {'type': 'registrations', 'attributes':
{'public': True}}]}, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 1
assert res.json['errors'][0]['source']['pointer'] == '/data/1/id'
assert res.json['errors'][0]['detail'] == "This field may not be null."
# test_bulk_update_type_not_supplied
res = app.put_json_api(url, {'data': [public_payload['data'][1], {'id': registration_one._id, 'attributes':
{'public': True}}]}, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 1
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
assert res.json['errors'][0]['detail'] == "This field may not be null."
# test_bulk_update_incorrect_type
res = app.put_json_api(url, {'data': [public_payload['data'][1], {'id': registration_one._id, 'type': 'Incorrect', 'attributes':
{'public': True}}]}, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 409
# test_bulk_update_limits
registration_update_list = {'data': [public_payload['data'][0]] * 101}
res = app.put_json_api(url, registration_update_list, auth=user.auth, expect_errors=True, bulk=True)
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# 400 from attempting to make a registration private
res = app.put_json_api(url, private_payload, auth=user.auth, bulk=True, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registrations can only be turned from private to public.'
# Confirm no changes have occured
registration_one.refresh_from_db()
registration_two.refresh_from_db()
assert registration_one.embargo_termination_approval is None
assert registration_two.embargo_termination_approval is None
assert registration_one.is_public is False
assert registration_two.is_public is False
assert registration_one.title == 'Birds'
assert registration_two.title == 'Birds II'
def test_bulk_update_embargo_logged_in_read_only_contrib(self, app, user, registration_one, registration_two, public_payload, url):
read_contrib = AuthUserFactory()
registration_one.add_contributor(read_contrib, permissions=[permissions.READ], save=True)
registration_two.add_contributor(read_contrib, permissions=[permissions.READ], save=True)
res = app.put_json_api(url, public_payload, auth=read_contrib.auth, expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_bulk_update_embargo_logged_in_contrib(self, app, user, registration_one, registration_two, public_payload, url):
assert registration_one.embargo_termination_approval is None
assert registration_two.embargo_termination_approval is None
res = app.put_json_api(url, public_payload, auth=user.auth, bulk=True)
assert res.status_code == 200
assert ({registration_one._id, registration_two._id} == {res.json['data'][0]['id'], res.json['data'][1]['id']})
# Needs confirmation before it will become public
assert res.json['data'][0]['attributes']['public'] is False
assert res.json['data'][1]['attributes']['public'] is False
registration_one.refresh_from_db()
registration_two.refresh_from_db()
# registrations should have pending terminations
assert registration_one.embargo_termination_approval and registration_one.embargo_termination_approval.is_pending_approval
assert registration_two.embargo_termination_approval and registration_two.embargo_termination_approval.is_pending_approval
class TestRegistrationListFiltering(RegistrationListFilteringMixin, ApiTestCase):
url = '/{}registrations/?'.format(API_BASE)
|
{
"content_hash": "8f54613c6f5194b1e3b7634a2b29f70a",
"timestamp": "",
"source": "github",
"line_count": 1058,
"max_line_length": 200,
"avg_line_length": 44.71266540642722,
"alnum_prop": 0.6057159768316915,
"repo_name": "TomBaxter/osf.io",
"id": "c37e838d4925fd1f642ccb5c5d1faea74dcf9b13",
"size": "47306",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api_tests/registrations/views/test_registration_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110148"
},
{
"name": "HTML",
"bytes": "225000"
},
{
"name": "JavaScript",
"bytes": "1807478"
},
{
"name": "Mako",
"bytes": "642588"
},
{
"name": "Python",
"bytes": "7561502"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
'''
This contains functions to run the Trend Filtering Algorithm (TFA) in a
parallelized manner on large collections of light curves.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import pickle
import os
import os.path
import glob
import multiprocessing as mp
import gzip
from tornado.escape import squeeze
import numpy as np
import numpy.random as npr
npr.seed(0xc0ffee)
import scipy.interpolate as spi
from scipy import linalg as spla
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# to turn a list of keys into a dict address
# from https://stackoverflow.com/a/14692747
from functools import reduce
from operator import getitem
def _dict_get(datadict, keylist):
return reduce(getitem, keylist, datadict)
############
## CONFIG ##
############
NCPUS = mp.cpu_count()
###################
## LOCAL IMPORTS ##
###################
from astrobase import coordutils
from astrobase.varclass import starfeatures, varfeatures
from astrobase.lcmath import (
normalize_magseries,
sigclip_magseries
)
from astrobase.lcproc import get_lcformat
##################################
## LIGHT CURVE DETRENDING - TFA ##
##################################
def _collect_tfa_stats(task):
'''
This is a parallel worker to gather LC stats.
task[0] = lcfile
task[1] = lcformat
task[2] = lcformatdir
task[3] = timecols
task[4] = magcols
task[5] = errcols
task[6] = custom_bandpasses
'''
try:
(lcfile, lcformat, lcformatdir,
timecols, magcols, errcols,
custom_bandpasses) = task
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# get the LC into a dict
lcdict = readerfunc(lcfile)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
#
# collect the necessary stats for this light curve
#
# 1. number of observations
# 2. median mag
# 3. eta_normal
# 4. MAD
# 5. objectid
# 6. get mags and colors from objectinfo if there's one in lcdict
if 'objectid' in lcdict:
objectid = lcdict['objectid']
elif 'objectinfo' in lcdict and 'objectid' in lcdict['objectinfo']:
objectid = lcdict['objectinfo']['objectid']
elif 'objectinfo' in lcdict and 'hatid' in lcdict['objectinfo']:
objectid = lcdict['objectinfo']['hatid']
else:
LOGERROR('no objectid present in lcdict for LC %s, '
'using filename prefix as objectid' % lcfile)
objectid = os.path.splitext(os.path.basename(lcfile))[0]
if 'objectinfo' in lcdict:
colorfeat = starfeatures.color_features(
lcdict['objectinfo'],
deredden=False,
custom_bandpasses=custom_bandpasses
)
else:
LOGERROR('no objectinfo dict in lcdict, '
'could not get magnitudes for LC %s, '
'cannot use for TFA template ensemble' %
lcfile)
return None
# this is the initial dict
resultdict = {'objectid':objectid,
'ra':lcdict['objectinfo']['ra'],
'decl':lcdict['objectinfo']['decl'],
'colorfeat':colorfeat,
'lcfpath':os.path.abspath(lcfile),
'lcformat':lcformat,
'lcformatdir':lcformatdir,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols}
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
try:
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
# get the variability features for this object
varfeat = varfeatures.all_nonperiodic_features(
times, mags, errs
)
resultdict[mcol] = varfeat
except Exception:
LOGEXCEPTION('%s, magcol: %s, probably ran into all-nans' %
(lcfile, mcol))
resultdict[mcol] = {'ndet':0,
'mad':np.nan,
'eta_normal':np.nan}
return resultdict
except Exception:
LOGEXCEPTION('could not execute get_tfa_stats for task: %s' %
repr(task))
return None
def _reform_templatelc_for_tfa(task):
'''
This is a parallel worker that reforms light curves for TFA.
task[0] = lcfile
task[1] = lcformat
task[2] = lcformatdir
task[3] = timecol
task[4] = magcol
task[5] = errcol
task[6] = timebase
task[7] = interpolate_type
task[8] = sigclip
'''
try:
(lcfile, lcformat, lcformatdir,
tcol, mcol, ecol,
timebase, interpolate_type, sigclip) = task
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# get the LC into a dict
lcdict = readerfunc(lcfile)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
outdict = {}
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
#
# now we'll do: 1. sigclip, 2. reform to timebase, 3. renorm to zero
#
# 1. sigclip as requested
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
sigclip=sigclip)
# 2. now, we'll renorm to the timebase
mags_interpolator = spi.interp1d(stimes, smags,
kind=interpolate_type,
fill_value='extrapolate')
errs_interpolator = spi.interp1d(stimes, serrs,
kind=interpolate_type,
fill_value='extrapolate')
interpolated_mags = mags_interpolator(timebase)
interpolated_errs = errs_interpolator(timebase)
# 3. renorm to zero
magmedian = np.median(interpolated_mags)
renormed_mags = interpolated_mags - magmedian
# update the dict
outdict = {'mags':renormed_mags,
'errs':interpolated_errs,
'origmags':interpolated_mags}
#
# done with this magcol
#
return outdict
except Exception:
LOGEXCEPTION('reform LC task failed: %s' % repr(task))
return None
def tfa_templates_lclist(
lclist,
outfile,
lcinfo_pkl=None,
target_template_frac=0.1,
max_target_frac_obs=0.25,
min_template_number=10,
max_template_number=1000,
max_rms=0.15,
max_mult_above_magmad=1.5,
max_mult_above_mageta=1.5,
mag_bandpass='sdssr',
custom_bandpasses=None,
mag_bright_limit=10.0,
mag_faint_limit=12.0,
process_template_lcs=True,
template_sigclip=5.0,
template_interpolate='nearest',
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None,
nworkers=NCPUS,
maxworkertasks=1000,
):
'''This selects template objects for TFA.
Selection criteria for TFA template ensemble objects:
- not variable: use a poly fit to the mag-MAD relation and eta-normal
variability index to get nonvar objects
- not more than 10% of the total number of objects in the field or
`max_tfa_templates` at most and no more than `max_target_frac_obs x
template_ndet` objects.
- allow shuffling of the templates if the target ends up in them
- nothing with less than the median number of observations in the field
- sigma-clip the input time series observations
- TODO: select randomly in xi-eta space. This doesn't seem to make a huge
difference at the moment, so removed those bits for now. This function
makes plots of xi-eta for the selected template objects so the
distributions can be visualized.
This also determines the effective cadence that all TFA LCs will be binned
to as the template LC with the largest number of non-nan observations will
be used. All template LCs will be renormed to zero.
Parameters
----------
lclist : list of str
This is a list of light curves to use as input to generate the template
set.
outfile : str
This is the pickle filename to which the TFA template list will be
written to.
lcinfo_pkl : str or None
If provided, is a file path to a pickle file created by this function on
a previous run containing the LC information. This will be loaded
directly instead of having to re-run LC info collection. If None, will
be placed in the same directory as outfile.
target_template_frac : float
This is the fraction of total objects in lclist to use for the number of
templates.
max_target_frac_obs : float
This sets the number of templates to generate if the number of
observations for the light curves is smaller than the number of objects
in the collection. The number of templates will be set to this fraction
of the number of observations if this is the case.
min_template_number : int
This is the minimum number of templates to generate.
max_template_number : int
This is the maximum number of templates to generate. If
`target_template_frac` times the number of objects is greater than
`max_template_number`, only `max_template_number` templates will be
used.
max_rms : float
This is the maximum light curve RMS for an object to consider it as a
possible template ensemble member.
max_mult_above_magmad : float
This is the maximum multiplier above the mag-RMS fit to consider an
object as variable and thus not part of the template ensemble.
max_mult_above_mageta : float
This is the maximum multiplier above the mag-eta (variable index) fit to
consider an object as variable and thus not part of the template
ensemble.
mag_bandpass : str
This sets the key in the light curve dict's objectinfo dict to use as
the canonical magnitude for the object and apply any magnitude limits
to.
custom_bandpasses : dict or None
This can be used to provide any custom band name keys to the star
feature collection function.
mag_bright_limit : float or list of floats
This sets the brightest mag (in the `mag_bandpass` filter) for a
potential member of the TFA template ensemble. If this is a single
float, the value will be used for all magcols. If this is a list of
floats with len = len(magcols), the specific bright limits will be used
for each magcol individually.
mag_faint_limit : float or list of floats
This sets the faintest mag (in the `mag_bandpass` filter) for a
potential member of the TFA template ensemble. If this is a single
float, the value will be used for all magcols. If this is a list of
floats with len = len(magcols), the specific faint limits will be used
for each magcol individually.
process_template_lcs : bool
If True, will reform the template light curves to the chosen
time-base. If False, will only select light curves for templates but not
process them. This is useful for initial exploration of how the template
LC are selected.
template_sigclip : float or sequence of floats or None
This sets the sigma-clip to be applied to the template light curves.
template_interpolate : str
This sets the kwarg to pass to `scipy.interpolate.interp1d` to set the
kind of interpolation to use when reforming light curves to the TFA
template timebase.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
nworkers : int
The number of parallel workers to launch.
maxworkertasks : int
The maximum number of tasks to run per worker before it is replaced by a
fresh one.
Returns
-------
dict
This function returns a dict that can be passed directly to
`apply_tfa_magseries` below. It can optionally produce a pickle with the
same dict, which can also be passed to that function.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
LOGINFO('collecting light curve information for %s objects in list...' %
len(lclist))
#
# check if we have cached results for this run
#
check_lcinfo_path = os.path.join(
os.path.dirname(outfile),
'tfa-collected-lcinfo-%s.pkl' % lcformat
)
# case where we provide a cache info pkl directly
if lcinfo_pkl and os.path.exists(lcinfo_pkl):
with open(lcinfo_pkl,'rb') as infd:
results = pickle.load(infd)
# if we don't provide an lcinfo pkl
elif not lcinfo_pkl and os.path.exists(check_lcinfo_path):
with open(check_lcinfo_path, 'rb') as infd:
results = pickle.load(infd)
# otherwise, we have to redo the LC info collection
else:
# first, we'll collect the light curve info
tasks = [(x, lcformat, lcformat,
timecols, magcols, errcols,
custom_bandpasses) for x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(_collect_tfa_stats, tasks)
pool.close()
pool.join()
# save these results so we don't have to redo if something breaks here
if lcinfo_pkl:
with open(lcinfo_pkl,'wb') as outfd:
pickle.dump(results, outfd, pickle.HIGHEST_PROTOCOL)
else:
with open(check_lcinfo_path,'wb') as outfd:
pickle.dump(results, outfd, pickle.HIGHEST_PROTCOL)
#
# now, go through the light curve information
#
# find the center RA and center DEC -> median of all LC RAs and DECs
all_ras = np.array([res['ra'] for res in results])
all_decls = np.array([res['decl'] for res in results])
center_ra = np.nanmedian(all_ras)
center_decl = np.nanmedian(all_decls)
outdict = {
'timecols':[],
'magcols':[],
'errcols':[],
'center_ra':center_ra,
'center_decl':center_decl,
}
# for each magcol, we'll generate a separate template list
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
# these are the containers for possible template collection LC info
(lcmag, lcmad, lceta,
lcndet, lcobj, lcfpaths,
lcra, lcdecl) = [], [], [], [], [], [], [], []
outdict['timecols'].append(tcol)
outdict['magcols'].append(mcol)
outdict['errcols'].append(ecol)
# add to the collection of all light curves
outdict[mcol] = {'collection':{'mag':[],
'mad':[],
'eta':[],
'ndet':[],
'obj':[],
'lcf':[],
'ra':[],
'decl':[]}}
LOGINFO('magcol: %s, collecting prospective template LC info...' %
mcol)
# collect the template LCs for this magcol
for result in results:
# we'll only append objects that have all of these elements
try:
thismag = result['colorfeat'][mag_bandpass]
thismad = result[mcol]['mad']
thiseta = result[mcol]['eta_normal']
thisndet = result[mcol]['ndet']
thisobj = result['objectid']
thislcf = result['lcfpath']
thisra = result['ra']
thisdecl = result['decl']
outdict[mcol]['collection']['mag'].append(thismag)
outdict[mcol]['collection']['mad'].append(thismad)
outdict[mcol]['collection']['eta'].append(thiseta)
outdict[mcol]['collection']['ndet'].append(thisndet)
outdict[mcol]['collection']['obj'].append(thisobj)
outdict[mcol]['collection']['lcf'].append(thislcf)
outdict[mcol]['collection']['ra'].append(thisra)
outdict[mcol]['collection']['decl'].append(thisdecl)
# check if we have more than one bright or faint limit elem
if isinstance(mag_bright_limit, (list, tuple)):
use_bright_maglim = mag_bright_limit[
magcols.index(mcol)
]
else:
use_bright_maglim = mag_bright_limit
if isinstance(mag_faint_limit, (list, tuple)):
use_faint_maglim = mag_faint_limit[
magcols.index(mcol)
]
else:
use_faint_maglim = mag_faint_limit
# make sure the object lies in the mag limits and RMS limits we
# set before to try to accept it into the TFA ensemble
if ((use_bright_maglim < thismag < use_faint_maglim) and
(1.4826*thismad < max_rms)):
lcmag.append(thismag)
lcmad.append(thismad)
lceta.append(thiseta)
lcndet.append(thisndet)
lcobj.append(thisobj)
lcfpaths.append(thislcf)
lcra.append(thisra)
lcdecl.append(thisdecl)
except Exception:
pass
# make sure we have enough LCs to work on
if len(lcobj) >= min_template_number:
LOGINFO('magcol: %s, %s objects eligible for '
'template selection after filtering on mag '
'limits (%s, %s) and max RMS (%s)' %
(mcol, len(lcobj),
mag_bright_limit, mag_faint_limit, max_rms))
lcmag = np.array(lcmag)
lcmad = np.array(lcmad)
lceta = np.array(lceta)
lcndet = np.array(lcndet)
lcobj = np.array(lcobj)
lcfpaths = np.array(lcfpaths)
lcra = np.array(lcra)
lcdecl = np.array(lcdecl)
sortind = np.argsort(lcmag)
lcmag = lcmag[sortind]
lcmad = lcmad[sortind]
lceta = lceta[sortind]
lcndet = lcndet[sortind]
lcobj = lcobj[sortind]
lcfpaths = lcfpaths[sortind]
lcra = lcra[sortind]
lcdecl = lcdecl[sortind]
# 1. get the mag-MAD relation
# this is needed for spline fitting
# should take care of the pesky 'x must be strictly increasing' bit
splfit_ind = np.diff(lcmag) > 0.0
splfit_ind = np.concatenate((np.array([True]), splfit_ind))
fit_lcmag = lcmag[splfit_ind]
fit_lcmad = lcmad[splfit_ind]
fit_lceta = lceta[splfit_ind]
magmadfit = np.poly1d(np.polyfit(
fit_lcmag,
fit_lcmad,
2
))
magmadind = lcmad/magmadfit(lcmag) < max_mult_above_magmad
# 2. get the mag-eta relation
magetafit = np.poly1d(np.polyfit(
fit_lcmag,
fit_lceta,
2
))
magetaind = magetafit(lcmag)/lceta < max_mult_above_mageta
# 3. get the median ndet
median_ndet = np.median(lcndet)
ndetind = lcndet >= median_ndet
# form the final template ensemble
templateind = magmadind & magetaind & ndetind
# check again if we have enough LCs in the template
if templateind.sum() >= min_template_number:
LOGINFO('magcol: %s, %s objects selectable for TFA templates' %
(mcol, templateind.sum()))
templatemag = lcmag[templateind]
templatemad = lcmad[templateind]
templateeta = lceta[templateind]
templatendet = lcndet[templateind]
templateobj = lcobj[templateind]
templatelcf = lcfpaths[templateind]
templatera = lcra[templateind]
templatedecl = lcdecl[templateind]
# now, check if we have no more than the required fraction of
# TFA templates
target_number_templates = int(target_template_frac*len(results))
if target_number_templates > max_template_number:
target_number_templates = max_template_number
LOGINFO('magcol: %s, selecting %s TFA templates randomly' %
(mcol, target_number_templates))
# get the xi-eta
template_cxi, template_ceta = coordutils.xieta_from_radecl(
templatera,
templatedecl,
center_ra,
center_decl
)
# select random uniform objects from the template candidates
targetind = npr.choice(templateobj.size,
target_number_templates,
replace=False)
templatemag = templatemag[targetind]
templatemad = templatemad[targetind]
templateeta = templateeta[targetind]
templatendet = templatendet[targetind]
templateobj = templateobj[targetind]
templatelcf = templatelcf[targetind]
templatera = templatera[targetind]
templatedecl = templatedecl[targetind]
template_cxi = template_cxi[targetind]
template_ceta = template_ceta[targetind]
# get the max ndet so far to use that LC as the timebase
maxndetind = templatendet == templatendet.max()
timebaselcf = templatelcf[maxndetind][0]
timebasendet = templatendet[maxndetind][0]
LOGINFO('magcol: %s, selected %s as template time '
'base LC with %s observations' %
(mcol, timebaselcf, timebasendet))
if process_template_lcs:
timebaselcdict = readerfunc(timebaselcf)
if ( (isinstance(timebaselcdict, (list, tuple))) and
(isinstance(timebaselcdict[0], dict)) ):
timebaselcdict = timebaselcdict[0]
# this is the timebase to use for all of the templates
timebase = _dict_get(timebaselcdict, tcolget)
else:
timebase = None
# also check if the number of templates is longer than the
# actual timebase of the observations. this will cause issues
# with overcorrections and will probably break TFA
if target_number_templates > timebasendet:
LOGWARNING('The number of TFA templates (%s) is '
'larger than the number of observations '
'of the time base (%s). This will likely '
'overcorrect all light curves to a '
'constant level. '
'Will use up to %s x timebase ndet '
'templates instead' %
(target_number_templates,
timebasendet,
max_target_frac_obs))
# regen the templates based on the new number
newmaxtemplates = int(max_target_frac_obs*timebasendet)
# choose this number out of the already chosen templates
# randomly
LOGWARNING('magcol: %s, re-selecting %s TFA '
'templates randomly' %
(mcol, newmaxtemplates))
# select random uniform objects from the template candidates
targetind = npr.choice(templateobj.size,
newmaxtemplates,
replace=False)
templatemag = templatemag[targetind]
templatemad = templatemad[targetind]
templateeta = templateeta[targetind]
templatendet = templatendet[targetind]
templateobj = templateobj[targetind]
templatelcf = templatelcf[targetind]
templatera = templatera[targetind]
templatedecl = templatedecl[targetind]
template_cxi = template_cxi[targetind]
template_ceta = template_ceta[targetind]
plt.plot(template_cxi, template_ceta,
marker='o', linestyle='none', ms=1.0)
plt.xlabel('image plane-projected coordinate xi')
plt.ylabel('image plane-projected coordinate eta')
plt.title(
'image plane-projected coords - selected template objs'
)
plt.savefig(
os.path.join(os.path.dirname(outfile),
'template-cxi-ceta-%s.png' % mcol),
bbox_inches='tight'
)
plt.close('all')
# get the max ndet so far to use that LC as the timebase
maxndetind = templatendet == templatendet.max()
timebaselcf = templatelcf[maxndetind][0]
timebasendet = templatendet[maxndetind][0]
LOGWARNING('magcol: %s, re-selected %s as template time '
'base LC with %s observations' %
(mcol, timebaselcf, timebasendet))
if process_template_lcs:
timebaselcdict = readerfunc(timebaselcf)
if ( (isinstance(timebaselcdict, (list, tuple))) and
(isinstance(timebaselcdict[0], dict)) ):
timebaselcdict = timebaselcdict[0]
# this is the timebase to use for all of the templates
timebase = _dict_get(timebaselcdict, tcolget)
else:
timebase = None
#
# end of check for ntemplates > timebase ndet
#
if process_template_lcs:
LOGINFO('magcol: %s, reforming TFA template LCs to '
' chosen timebase...' % mcol)
# reform all template LCs to this time base, normalize to
# zero, and sigclip as requested. this is a parallel op
# first, we'll collect the light curve info
tasks = [(x, lcformat, lcformatdir,
tcol, mcol, ecol,
timebase, template_interpolate,
template_sigclip) for x
in templatelcf]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
reform_results = pool.map(_reform_templatelc_for_tfa, tasks)
pool.close()
pool.join()
# generate a 2D array for the template magseries with
# dimensions = (n_objects, n_lcpoints)
template_magseries = np.array([x['mags']
for x in reform_results])
template_errseries = np.array([x['errs']
for x in reform_results])
else:
template_magseries = None
template_errseries = None
# put everything into a templateinfo dict for this magcol
outdict[mcol].update({
'timebaselcf':timebaselcf,
'timebase':timebase,
'trendfits':{'mag-mad':magmadfit,
'mag-eta':magetafit},
'template_objects':templateobj,
'template_ra':templatera,
'template_decl':templatedecl,
'template_cxi':template_cxi,
'template_ceta':template_ceta,
'template_mag':templatemag,
'template_mad':templatemad,
'template_eta':templateeta,
'template_ndet':templatendet,
'template_magseries':template_magseries,
'template_errseries':template_errseries
})
# make a KDTree on the template coordinates
outdict[mcol]['template_radecl_kdtree'] = (
coordutils.make_kdtree(
templatera, templatedecl
)
)
# if we don't have enough, return nothing for this magcol
else:
LOGERROR('not enough objects meeting requested '
'MAD, eta, ndet conditions to '
'select templates for magcol: %s' % mcol)
continue
else:
LOGERROR('nobjects: %s, not enough in requested mag range to '
'select templates for magcol: %s' % (len(lcobj),mcol))
continue
# make the plots for mag-MAD/mag-eta relation and fits used
plt.plot(lcmag, lcmad, marker='o', linestyle='none', ms=1.0)
modelmags = np.linspace(lcmag.min(), lcmag.max(), num=1000)
plt.plot(modelmags, outdict[mcol]['trendfits']['mag-mad'](modelmags))
plt.yscale('log')
plt.xlabel('catalog magnitude')
plt.ylabel('light curve MAD')
plt.title('catalog mag vs. light curve MAD and fit')
plt.savefig(
os.path.join(os.path.dirname(outfile),
'catmag-%s-lcmad-fit.png' % mcol),
bbox_inches='tight'
)
plt.close('all')
plt.plot(lcmag, lceta, marker='o', linestyle='none', ms=1.0)
modelmags = np.linspace(lcmag.min(), lcmag.max(), num=1000)
plt.plot(modelmags, outdict[mcol]['trendfits']['mag-eta'](modelmags))
plt.yscale('log')
plt.xlabel('catalog magnitude')
plt.ylabel('light curve eta variable index')
plt.title('catalog mag vs. light curve eta and fit')
plt.savefig(
os.path.join(os.path.dirname(outfile),
'catmag-%s-lceta-fit.png' % mcol),
bbox_inches='tight'
)
plt.close('all')
#
# end of operating on each magcol
#
if outfile.endswith('.gz'):
outfd = gzip.open(outfile,'wb')
else:
outfd = open(outfile,'wb')
with outfd:
pickle.dump(outdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
# return the templateinfo dict
return outdict
def apply_tfa_magseries(lcfile,
timecol,
magcol,
errcol,
templateinfo,
mintemplatedist_arcmin=10.0,
lcformat='hat-sql',
lcformatdir=None,
interp='nearest',
sigclip=5.0):
'''This applies the TFA correction to an LC given TFA template information.
Parameters
----------
lcfile : str
This is the light curve file to apply the TFA correction to.
timecol,magcol,errcol : str
These are the column keys in the lcdict for the LC file to apply the TFA
correction to.
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming this light curve to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to this light curve before running TFA
on it.
Returns
-------
str
This returns the filename of the light curve file generated after TFA
applications. This is a pickle (that can be read by `lcproc.read_pklc`)
in the same directory as `lcfile`. The `magcol` will be encoded in the
filename, so each `magcol` in `lcfile` gets its own output file.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# get the templateinfo from a pickle if necessary
if isinstance(templateinfo,str) and os.path.exists(templateinfo):
with open(templateinfo,'rb') as infd:
templateinfo = pickle.load(infd)
lcdict = readerfunc(lcfile)
if ((isinstance(lcdict, (tuple, list))) and
isinstance(lcdict[0], dict)):
lcdict = lcdict[0]
objectid = lcdict['objectid']
# this is the initial template array
tmagseries = templateinfo[magcol][
'template_magseries'
][::]
# if the object itself is in the template ensemble, remove it
if objectid in templateinfo[magcol]['template_objects']:
LOGWARNING('object %s found in the TFA template ensemble, removing...' %
objectid)
templateind = templateinfo[magcol]['template_objects'] == objectid
# get the objects in the tmagseries not corresponding to the current
# object's index
tmagseries = tmagseries[~templateind,:]
# check if there are close matches to the current object in the templates
object_matches = coordutils.conesearch_kdtree(
templateinfo[magcol]['template_radecl_kdtree'],
lcdict['objectinfo']['ra'], lcdict['objectinfo']['decl'],
mintemplatedist_arcmin/60.0
)
if len(object_matches) > 0:
LOGWARNING(
"object %s is within %.1f arcminutes of %s "
"template objects. Will remove these objects "
"from the template applied to this object." %
(objectid, mintemplatedist_arcmin, len(object_matches))
)
removalind = np.full(
templateinfo[magcol]['template_objects'].size,
False, dtype=np.bool
)
removalind[np.array(object_matches)] = True
tmagseries = tmagseries[~removalind,:]
#
# finally, proceed to TFA
#
# this is the normal matrix
normal_matrix = np.dot(tmagseries, tmagseries.T)
# get the inverse of the matrix
normal_matrix_inverse = spla.pinv2(normal_matrix)
# get the timebase from the template
timebase = templateinfo[magcol]['timebase']
# use this to reform the target lc in the same manner as that for a TFA
# template LC
reformed_targetlc = _reform_templatelc_for_tfa((
lcfile,
lcformat,
lcformatdir,
timecol,
magcol,
errcol,
timebase,
interp,
sigclip
))
# calculate the scalar products of the target and template magseries
scalar_products = np.dot(tmagseries, reformed_targetlc['mags'])
# calculate the corrections
corrections = np.dot(normal_matrix_inverse, scalar_products)
# finally, get the corrected time series for the target object
corrected_magseries = (
reformed_targetlc['origmags'] -
np.dot(tmagseries.T, corrections)
)
outdict = {
'times':timebase,
'mags':corrected_magseries,
'errs':reformed_targetlc['errs'],
'mags_median':np.median(corrected_magseries),
'mags_mad': np.median(np.abs(corrected_magseries -
np.median(corrected_magseries))),
'work':{'tmagseries':tmagseries,
'normal_matrix':normal_matrix,
'normal_matrix_inverse':normal_matrix_inverse,
'scalar_products':scalar_products,
'corrections':corrections,
'reformed_targetlc':reformed_targetlc},
}
# we'll write back the tfa times and mags to the lcdict
lcdict['tfa'] = outdict
outfile = os.path.join(
os.path.dirname(lcfile),
'%s-tfa-%s-pklc.pkl' % (
squeeze(objectid).replace(' ','-'),
magcol
)
)
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)
return outfile
def _parallel_tfa_worker(task):
'''
This is a parallel worker for the function below.
task[0] = lcfile
task[1] = timecol
task[2] = magcol
task[3] = errcol
task[4] = templateinfo
task[5] = lcformat
task[6] = lcformatdir
task[6] = interp
task[7] = sigclip
task[8] = mintemplatedist_arcmin
'''
(lcfile, timecol, magcol, errcol,
templateinfo, lcformat, lcformatdir,
interp, sigclip, mintemplatedist_arcmin) = task
try:
res = apply_tfa_magseries(
lcfile, timecol, magcol, errcol,
templateinfo,
lcformat=lcformat,
lcformatdir=lcformatdir,
interp=interp,
sigclip=sigclip,
mintemplatedist_arcmin=mintemplatedist_arcmin
)
if res:
LOGINFO('%s -> %s TFA OK' % (lcfile, res))
return res
except Exception:
LOGEXCEPTION('TFA failed for %s' % lcfile)
return None
def parallel_tfa_lclist(lclist,
templateinfo,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
interp='nearest',
sigclip=5.0,
mintemplatedist_arcmin=10.0,
nworkers=NCPUS,
maxworkertasks=1000):
'''This applies TFA in parallel to all LCs in the given list of file names.
Parameters
----------
lclist : str
This is a list of light curve files to apply TFA correction to.
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
timecols : list of str or None
The timecol keys to use from the lcdict in applying TFA corrections.
magcols : list of str or None
The magcol keys to use from the lcdict in applying TFA corrections.
errcols : list of str or None
The errcol keys to use from the lcdict in applying TFA corrections.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming the light curves to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to the light curves before running TFA
on it.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
nworkers : int
The number of parallel workers to launch
maxworkertasks : int
The maximum number of tasks per worker allowed before it's replaced by a
fresh one.
Returns
-------
dict
Contains the input file names and output TFA light curve filenames per
input file organized by each `magcol` in `magcols`.
'''
# open the templateinfo first
if isinstance(templateinfo,str) and os.path.exists(templateinfo):
with open(templateinfo,'rb') as infd:
templateinfo = pickle.load(infd)
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
# we'll get the defaults from the templateinfo object
if timecols is None:
timecols = templateinfo['timecols']
if magcols is None:
magcols = templateinfo['magcols']
if errcols is None:
errcols = templateinfo['errcols']
outdict = {}
# run by magcol
for t, m, e in zip(timecols, magcols, errcols):
tasks = [(x, t, m, e, templateinfo,
lcformat, lcformatdir,
interp, sigclip, mintemplatedist_arcmin) for
x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(_parallel_tfa_worker, tasks)
pool.close()
pool.join()
outdict[m] = results
return outdict
def parallel_tfa_lcdir(lcdir,
templateinfo,
lcfileglob=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
interp='nearest',
sigclip=5.0,
mintemplatedist_arcmin=10.0,
nworkers=NCPUS,
maxworkertasks=1000):
'''This applies TFA in parallel to all LCs in a directory.
Parameters
----------
lcdir : str
This is the directory containing the light curve files to process..
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
lcfileglob : str or None
The UNIX file glob to use when searching for light curve files in
`lcdir`. If None, the default file glob associated with registered LC
format provided is used.
timecols : list of str or None
The timecol keys to use from the lcdict in applying TFA corrections.
magcols : list of str or None
The magcol keys to use from the lcdict in applying TFA corrections.
errcols : list of str or None
The errcol keys to use from the lcdict in applying TFA corrections.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming the light curves to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to the light curves before running TFA
on it.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
nworkers : int
The number of parallel workers to launch
maxworkertasks : int
The maximum number of tasks per worker allowed before it's replaced by a
fresh one.
Returns
-------
dict
Contains the input file names and output TFA light curve filenames per
input file organized by each `magcol` in `magcols`.
'''
# open the templateinfo first
if isinstance(templateinfo,str) and os.path.exists(templateinfo):
with open(templateinfo,'rb') as infd:
templateinfo = pickle.load(infd)
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# find all the files matching the lcglob in lcdir
if lcfileglob is None:
lcfileglob = dfileglob
lclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob)))
return parallel_tfa_lclist(
lclist,
templateinfo,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
lcformatdir=None,
interp=interp,
sigclip=sigclip,
mintemplatedist_arcmin=mintemplatedist_arcmin,
nworkers=nworkers,
maxworkertasks=maxworkertasks
)
|
{
"content_hash": "eef1f139615b1c007ea3527f6ea61e6d",
"timestamp": "",
"source": "github",
"line_count": 1530,
"max_line_length": 80,
"avg_line_length": 35.020261437908495,
"alnum_prop": 0.562568821037308,
"repo_name": "lgbouma/astrobase",
"id": "3ab5410b8bc5034e3887b7ac25fb4e6663229382",
"size": "53694",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astrobase/lcproc/tfa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3584"
},
{
"name": "Dockerfile",
"bytes": "891"
},
{
"name": "HTML",
"bytes": "61470"
},
{
"name": "JavaScript",
"bytes": "171219"
},
{
"name": "Python",
"bytes": "2748532"
}
],
"symlink_target": ""
}
|
'''Test case for configuration reader
'''
import unittest
from lighty.signals import SignalDispatcher
class SignalsTestCase(unittest.TestCase):
'''Test case for signals implementation
'''
def setUp(self):
self.dispatcher = SignalDispatcher()
self.objects = None
self.filtered = None
def handler(objects):
self.objects = objects
def filtered(objects):
self.filtered = objects
self._filtered = filtered
self.dispatcher.channel('/test/', handler)
self.dispatcher.channel('/args/', handler)
self.dispatcher.channel('/args/', filtered, filters=[lambda x: x > 0])
def testEmptySignal(self):
'''Test handler applied for signal with no objects passed
'''
self.dispatcher.signal('/test/')
assert self.objects == [], (
'Error dispatchin signal with no objects: %s' % self.objects)
def testObjectsSignal(self):
'''Test handler with objects passed into signals
'''
self.dispatcher.signal('/args/', [0, 1, 2, 3])
assert self.objects == [0, 1, 2, 3], (
'Error dispatchin signal with objects: %s' % self.objects)
assert self.filtered == [1, 2, 3], ('Error dispatchin signal with '
'objects filtered: %s' % self.filtered)
def testRemoveHandler(self):
'''Test handler removing
'''
self.dispatcher.close('/args/', self._filtered)
self.dispatcher.signal('/args/', [0, 1, 2, 3])
assert self.objects == [0, 1, 2, 3], (
'Error dispatching signal with objects: %s' % self.objects)
assert self.filtered == None, ('Error removing handler: %s' %
self.filtered)
def testCloseChannel(self):
'''Test closing channels
'''
self.dispatcher.close('/args/')
self.dispatcher.signal('/args/', [0, 1, 2, 3])
assert self.objects == None, 'Error closing channel: %s' % self.objects
assert self.filtered == None, ('Error closing channel: %s' %
self.filtered)
def test():
suite = unittest.TestSuite()
suite.addTest(SignalsTestCase('testEmptySignal'))
suite.addTest(SignalsTestCase('testObjectsSignal'))
suite.addTest(SignalsTestCase('testCloseChannel'))
suite.addTest(SignalsTestCase('testRemoveHandler'))
return suite
|
{
"content_hash": "0dc660603a91fdc5f7b96c53ac008290",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 34.98571428571429,
"alnum_prop": 0.5933033891384238,
"repo_name": "GrAndSE/lighty",
"id": "bdceb771bf842e609111c34b531ff18757b415d1",
"size": "2449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "207994"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = True
def forwards(self, orm):
# Deleting field 'Project.team'
db.delete_column(u'sentry_project', 'team_id')
def backwards(self, orm):
# Adding field 'Project.team'
db.add_column(u'sentry_project', 'team',
self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.Team'], null=True, on_delete=models.SET_NULL),
keep_default=False)
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'86a08ef0c2974f20b02fc10061bb3a43a34d4518b0c74696880528b727153b69'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'118709d7cb3941a0b2eb3f14869ae33c9518024828104319815ecbc2f3585049'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Splendid Hyena'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'58f3ae56ab7e4c4388404b2434d13718'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 3, 2, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 4, 1, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'5d48d522c6e24d4eb4c39f38b039ff5f76c69268e2db465d91936978895bf4fb'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'e9d058c767484319b3c3e4d663cf6965ef396fd45fe04d18a55e1041703282bc'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.assistantactivity': {
'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'"},
'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 3, 9, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deletedorganization': {
'Meta': {'object_name': 'DeletedOrganization'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'object_name': 'DeletedProject'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'object_name': 'DeletedTeam'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "[('group_id', 'environment_id')]", 'object_name': 'GroupEnvironment', 'index_together': "[('environment_id', 'first_release_id')]"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'first_release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'object_name': 'GroupShare'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'c5af89218547415b95ca67d6861861d3'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'),)", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'organization'),)", 'object_name': 'IdentityProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'object_name': 'ProjectOwnership'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'dsym_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseprojectenvironment': {
'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 4, 1, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'2fea61aedea043f7a8361399ac14f3e9'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.servicehook': {
'Meta': {'object_name': 'ServiceHook'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'ca211259db6646bd946e927ddc6f25ae2f368f2e5e30423cb92559066ca25064'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'eX2VjbitpNR9X4yjNQaZ6RVS6jE590tU'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
|
{
"content_hash": "70e65c5cc98a11c17bad9b67bf33f487",
"timestamp": "",
"source": "github",
"line_count": 1095,
"max_line_length": 233,
"avg_line_length": 91.90593607305937,
"alnum_prop": 0.5796476445045063,
"repo_name": "mvaled/sentry",
"id": "38e056c8e6116e5a17d31e80750e660d009f4cb4",
"size": "100661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0396_auto__del_field_project_team.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
import pytest
from pygelf import GelfTcpHandler, GelfUdpHandler, GelfHttpHandler, GelfTlsHandler, GelfHttpsHandler, GelfTlsHandler
from tests.helper import logger, get_unique_message, log_warning
STATIC_FIELDS = {
'_ozzy': 'diary of a madman',
'_van_halen': 1984,
'_id': 42
}
@pytest.fixture(params=[
GelfTcpHandler(host='127.0.0.1', port=12201, **STATIC_FIELDS),
GelfUdpHandler(host='127.0.0.1', port=12202, **STATIC_FIELDS),
GelfUdpHandler(host='127.0.0.1', port=12202, compress=False, **STATIC_FIELDS),
GelfHttpHandler(host='127.0.0.1', port=12203, **STATIC_FIELDS),
GelfHttpHandler(host='127.0.0.1', port=12203, compress=False, **STATIC_FIELDS),
GelfTlsHandler(host='127.0.0.1', port=12204, **STATIC_FIELDS),
GelfTlsHandler(host='127.0.0.1', port=12204, validate=True, ca_certs='tests/config/cert.pem', **STATIC_FIELDS),
GelfTcpHandler(host='127.0.0.1', port=12201, static_fields=STATIC_FIELDS, _ozzy='billie jean'),
GelfUdpHandler(host='127.0.0.1', port=12202, static_fields=STATIC_FIELDS, _ozzy='billie jean'),
GelfUdpHandler(host='127.0.0.1', port=12202, compress=False, static_fields=STATIC_FIELDS, _ozzy='billie jean'),
GelfHttpHandler(host='127.0.0.1', port=12203, static_fields=STATIC_FIELDS, _ozzy='billie jean'),
GelfHttpHandler(host='127.0.0.1', port=12203, compress=False, static_fields=STATIC_FIELDS, _ozzy='billie jean'),
GelfTlsHandler(host='127.0.0.1', port=12204, static_fields=STATIC_FIELDS),
GelfHttpsHandler(host='127.0.0.1', port=12205, validate=False, static_fields=STATIC_FIELDS, _ozzy='billie jean'),
GelfTlsHandler(host='127.0.0.1', port=12204, validate=True, ca_certs='tests/config/cert.pem', static_fields=STATIC_FIELDS, _ozzy='billie jean'),
])
def handler(request):
return request.param
def test_static_fields(logger):
message = get_unique_message()
graylog_response = log_warning(logger, message, fields=['ozzy', 'van_halen'])
assert graylog_response['message'] == message
assert graylog_response['ozzy'] == 'diary of a madman'
assert graylog_response['van_halen'] == 1984
assert graylog_response['_id'] != 42
assert 'id' not in graylog_response
|
{
"content_hash": "d2cfda699ebbfecd45b3b0bb87a5efb9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 148,
"avg_line_length": 53.390243902439025,
"alnum_prop": 0.706258565555048,
"repo_name": "keeprocking/pygelf",
"id": "fb4eb08001ee27cbf88663721120e7bb17a4ba7f",
"size": "2189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_static_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27023"
},
{
"name": "Shell",
"bytes": "3310"
}
],
"symlink_target": ""
}
|
import os
import sys
import fnmatch
import codecs
if len(sys.argv) < 3:
print os.path.basename(__file__), 'target_path', 'framework_names...'
sys.exit(1)
FRAMEWORK_PATH = os.path.join(os.path.dirname(__file__), 'Frameworks')
target_path = sys.argv[1]
print 'Patching #import in *.h/*.m from', target_path
header_map = {}
for framework in sys.argv[2:]:
for root, dirnames, filenames in os.walk(os.path.join(FRAMEWORK_PATH, framework+'.framework', 'Headers')):
header_map[framework] = filenames
for framework in header_map.keys():
print framework+'.framework'
for header in header_map[framework]:
print '-', header
for root, dirnames, filenames in os.walk(os.path.join(os.path.dirname(__file__), target_path)):
for fname in [fname for fname in filenames if fname.endswith('.h') or fname.endswith('.m')]:
filepath = os.path.join(root, fname)
with codecs.open(filepath, 'r', 'utf-8') as input:
content = input.read()
for framework, headers in header_map.iteritems():
for header in headers:
content = content.replace('"'+header+'"', '<'+framework+'/'+header+'>')
with codecs.open(filepath, 'w', 'utf-8') as output:
output.write(content)
print 'patched', filepath
|
{
"content_hash": "8c54d5dbc06609151533a0d6a57f1bcd",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 109,
"avg_line_length": 36.74285714285714,
"alnum_prop": 0.6430793157076206,
"repo_name": "cubie-api/cubie-sdk-ios",
"id": "a72e595aff5556276fb1e63db03d2a4dedade0b3",
"size": "1286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fix_imports.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5006"
},
{
"name": "C++",
"bytes": "4467"
},
{
"name": "Objective-C",
"bytes": "313378"
},
{
"name": "Python",
"bytes": "1927"
},
{
"name": "Ruby",
"bytes": "947"
},
{
"name": "Shell",
"bytes": "2423"
}
],
"symlink_target": ""
}
|
import os
import sys
import tornado.auth
import tornado.escape
import tornado.ioloop
#import tornado.options
import tornado.web
import tornado.httpserver
import tornado.httputil
#import tornado.gen
import dill as pickle
import fnmatch
import importlib.util
import tarfile
import subprocess
import logging
from tornado.options import define, options
from prometheus_client import start_http_server, Summary
define("PIO_MODEL_STORE_HOME", default="model_store", help="path to model_store", type=str)
define("PIO_MODEL_TYPE", default="python3", help="prediction model type", type=str)
define("PIO_MODEL_NAMESPACE", default="default", help="prediction model namespace", type=str)
define("PIO_MODEL_NAME", default="scikit_balancescale", help="prediction model name", type=str)
define("PIO_MODEL_VERSION", default="v0", help="prediction model version", type=str)
define("PIO_MODEL_SERVER_PORT", default="9876", help="tornado http server listen port", type=int)
define("PIO_MODEL_SERVER_PROMETHEUS_PORT", default="8080", help="port to run the prometheus http metrics server on", type=int)
# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('request_processing_seconds', 'Model Server: Time spent processing request')
REQUEST_TIME.observe(1.0) # Observe 1.0 (seconds in this case)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
# url: /v1/model/predict/python3/$PIO_MODEL_NAMESPACE/$PIO_MODEL_NAME/$PIO_MODEL_VERSION/
(r"/v1/model/predict/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)", ModelPredictPython3Handler),
# TODO: Disable this if we're not explicitly in PIO_MODEL_ENVIRONMENT=dev mode
# url: /v1/model/deploy/python3/$PIO_MODEL_NAMESPACE/$PIO_MODEL_NAME/$PIO_MODEL_VERSION/
(r"/v1/model/deploy/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)", ModelDeployPython3Handler),
]
settings = dict(
model_store_path=options.PIO_MODEL_STORE_HOME,
model_type=options.PIO_MODEL_TYPE,
model_namespace=options.PIO_MODEL_NAMESPACE,
model_name=options.PIO_MODEL_NAME,
model_version=options.PIO_MODEL_VERSION,
model_server_port=options.PIO_MODEL_SERVER_PORT,
model_server_prometheus_server_port=options.PIO_MODEL_SERVER_PROMETHEUS_PORT,
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=True,
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
def fallback(self):
logger.warn('Model Server Application fallback: %s' % self)
return 'fallback!'
class IndexHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
self.render("index.html")
class ModelPredictPython3Handler(tornado.web.RequestHandler):
registry = {}
@REQUEST_TIME.time()
@tornado.web.asynchronous
def post(self, model_type, model_namespace, model_name, model_version):
(_, model_key, model, transformers_module) = self.get_model_assets(model_type,
model_namespace,
model_name,
model_version)
transformed_inputs = transformers_module.transform_inputs(self.request.body)
outputs = model.predict(transformed_inputs)
transformed_outputs = transformers_module.transform_outputs(outputs)
self.write(transformed_outputs)
self.finish()
@REQUEST_TIME.time()
def get_model_assets(self, model_type, model_namespace, model_name, model_version):
model_key = '%s_%s_%s_%s' % (model_type, model_namespace, model_name, model_version)
if model_key in self.registry:
(model_file_absolute_path, model_key, model, transformers_module) = self.registry[model_key]
else:
model_base_path = os.path.join(self.settings['model_store_path'], model_type)
model_base_path = os.path.join(model_base_path, model_namespace)
model_base_path = os.path.join(model_base_path, model_name)
model_base_path = os.path.join(model_base_path, model_version)
model_filename = fnmatch.filter(os.listdir(model_base_path), "*.pkl")[0]
# Set absolute path to model directory
model_file_absolute_path = os.path.join(model_base_path, model_filename)
# Load pickled model from model directory
with open(model_file_absolute_path, 'rb') as model_file:
model = pickle.load(model_file)
# Load model_io_transformers from model directory
transformers_module_name = 'model_io_transformers'
transformers_source_path = os.path.join(model_base_path, '%s.py' % transformers_module_name)
spec = importlib.util.spec_from_file_location(transformers_module_name, transformers_source_path)
transformers_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(transformers_module)
self.registry[model_key] = (model_file_absolute_path, model_key, model, transformers_module)
return (model_file_absolute_path, model_key, model, transformers_module)
class ModelDeployPython3Handler(tornado.web.RequestHandler):
@REQUEST_TIME.time()
def post(self, model_type, model_namespace, model_name, model_version):
fileinfo = self.request.files['file'][0]
model_file_source_bundle_path = fileinfo['filename']
(_, filename) = os.path.split(model_file_source_bundle_path)
bundle_path = os.path.join(self.settings['model_store_path'], model_type)
bundle_path = os.path.join(bundle_path, model_namespace)
bundle_path = os.path.join(bundle_path, model_name)
bundle_path = os.path.join(bundle_path, model_version)
bundle_path_filename = os.path.join(bundle_path, filename)
try:
os.makedirs(bundle_path, exist_ok=True)
with open(bundle_path_filename, 'wb+') as fh:
fh.write(fileinfo['body'])
logger.info("'%s' uploaded '%s', saved as '%s'" %
( str(self.request.remote_ip),
str(filename),
bundle_path_filename) )
logger.info("Uploading and extracting bundle '%s' into '%s'..." % (filename, bundle_path))
with tarfile.open(bundle_path_filename, "r:gz") as tar:
tar.extractall(path=bundle_path)
logger.info('...Done!')
logger.info('Installing bundle and updating environment...\n')
completed_process = subprocess.run('cd %s && ./install.sh' % bundle_path,
timeout=600,
shell=True,
stdout=subprocess.PIPE)
logger.info('...Done!')
self.write('Model successfully deployed!')
except IOError as e:
logger.error('Failed to write file due to IOError %s' % str(e))
self.write('Failed to write file due to IOError %s' % str(e))
raise e
def write_error(self, status_code, **kwargs):
self.write('Error %s' % status_code)
if "exc_info" in kwargs:
self.write(", Exception: %s" % kwargs["exc_info"][0].__name__)
def main():
# Start up a web server to expose request made and time spent metrics to Prometheus
# TODO: Potentially expose metrics to Prometheus using the Tornado HTTP server as long as it's not blocking
# See the MetricsHandler class which provides a BaseHTTPRequestHandler
# https://github.com/prometheus/client_python/blob/ce5542bd8be2944a1898e9ac3d6e112662153ea4/prometheus_client/exposition.py#L79
logger.info("Starting Prometheus Http Server on port '%s'" % options.PIO_MODEL_SERVER_PROMETHEUS_PORT)
start_http_server(int(options.PIO_MODEL_SERVER_PROMETHEUS_PORT))
logger.info("Starting Model Predict and Deploy Http Server on port '%s'" % options.PIO_MODEL_SERVER_PORT)
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(int(options.PIO_MODEL_SERVER_PORT))
tornado.ioloop.IOLoop.current().start()
if __name__ == '__main__':
main()
|
{
"content_hash": "34bb393c99c7d012f261aec54b844ad1",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 151,
"avg_line_length": 49.044444444444444,
"alnum_prop": 0.6315133665609425,
"repo_name": "shareactorIO/pipeline",
"id": "1cb90addc647efa972ffc6a4812f0fd311d4d199",
"size": "8852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prediction.ml/python3/src/main/python/model_server_python3.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "36325"
},
{
"name": "Batchfile",
"bytes": "63654"
},
{
"name": "C",
"bytes": "1759"
},
{
"name": "C++",
"bytes": "50538"
},
{
"name": "CSS",
"bytes": "548116"
},
{
"name": "Cuda",
"bytes": "12823"
},
{
"name": "Go",
"bytes": "9555"
},
{
"name": "Groovy",
"bytes": "24769"
},
{
"name": "HTML",
"bytes": "146027580"
},
{
"name": "Java",
"bytes": "109991"
},
{
"name": "JavaScript",
"bytes": "644060"
},
{
"name": "Jupyter Notebook",
"bytes": "17753504"
},
{
"name": "Makefile",
"bytes": "357"
},
{
"name": "PLSQL",
"bytes": "2470"
},
{
"name": "PLpgSQL",
"bytes": "3657"
},
{
"name": "Protocol Buffer",
"bytes": "692822"
},
{
"name": "Python",
"bytes": "844350"
},
{
"name": "Scala",
"bytes": "228848"
},
{
"name": "Shell",
"bytes": "176444"
},
{
"name": "XSLT",
"bytes": "80778"
}
],
"symlink_target": ""
}
|
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:driver_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm).
:connection_uri: Override for the default libvirt URI (depends on
driver_type).
:disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import eventlet
import functools
import glob
import mmap
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
from nova.pci import pci_whitelist
from nova import rpc
from nova import utils
from nova import version
from nova.virt import configdrive
from nova.virt import cpu
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova import volume
from nova.volume import encryptors
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image',
deprecated_group='DEFAULT'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image',
deprecated_group='DEFAULT'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image',
deprecated_group='DEFAULT'),
cfg.StrOpt('virt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)',
deprecated_group='DEFAULT',
deprecated_name='libvirt_type'),
cfg.StrOpt('connection_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on virt_type)',
deprecated_group='DEFAULT',
deprecated_name='libvirt_uri'),
cfg.BoolOpt('inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.',
deprecated_name='libvirt_inject_password',
deprecated_group='DEFAULT'),
cfg.BoolOpt('inject_key',
default=False,
help='Inject the ssh public key at boot time',
deprecated_name='libvirt_inject_key',
deprecated_group='DEFAULT'),
cfg.IntOpt('inject_partition',
default=-2,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number',
deprecated_name='libvirt_inject_partition',
deprecated_group='DEFAULT'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs',
deprecated_group='DEFAULT'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)',
deprecated_group='DEFAULT'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration',
deprecated_group='DEFAULT'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration',
deprecated_group='DEFAULT'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps',
deprecated_group='DEFAULT'),
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image',
deprecated_group='DEFAULT'),
cfg.StrOpt('vif_driver',
default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='DEPRECATED. The libvirt VIF driver to configure the VIFs.'
'This option is deprecated and will be removed in the '
'Juno release.',
deprecated_name='libvirt_vif_driver',
deprecated_group='DEFAULT'),
cfg.ListOpt('volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.',
deprecated_name='libvirt_volume_drivers',
deprecated_group='DEFAULT'),
cfg.StrOpt('disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on virt_type. '
'(valid options are: sd, xvd, uvd, vd)',
deprecated_name='libvirt_disk_prefix',
deprecated_group='DEFAULT'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.',
deprecated_name='libvirt_wait_soft_reboot_seconds',
deprecated_group='DEFAULT'),
cfg.StrOpt('cpu_mode',
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If virt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"',
deprecated_name='libvirt_cpu_mode',
deprecated_group='DEFAULT'),
cfg.StrOpt('cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'cpu_mode="custom" and virt_type="kvm|qemu"',
deprecated_name='libvirt_cpu_model',
deprecated_group='DEFAULT'),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service',
deprecated_name='libvirt_snapshots_directory',
deprecated_group='DEFAULT'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept',
deprecated_group='DEFAULT'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none',
deprecated_group='DEFAULT'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
CONF.import_opt('vcpu_pin_set', 'nova.virt.cpu')
CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = 'None'
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
# block size tuning requirements
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._caps = None
self._vcpu_total = 0
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
vif_class = importutils.import_class(CONF.libvirt.vif_driver)
self.vif_driver = vif_class(self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt.volume_drivers, self)
self.dev_filter = pci_whitelist.get_pci_devices_filter()
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = volume.API()
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
@staticmethod
def _has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if hypervisor_version < utils.convert_version_to_int(hv_ver):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._has_min_version(self._conn, lv_ver, hv_ver, hv_type)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
self.emit_event(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = last_close_event['reason']
_error = _("Connection to libvirt lost: %s") % reason
LOG.warn(_error)
self._wrapped_conn = None
# Disable compute service to avoid
# new instances of being scheduled on this host.
self._set_host_enabled(False, disable_reason=_error)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug(_("Starting native event thread"))
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug(_("Starting green dispatch thread"))
eventlet.spawn(self._dispatch_thread)
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
This will log a warning message about untested driver or host arch
configurations to indicate to administrators that the quality is
unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
is tested upstream.
"""
caps = self.get_host_capabilities()
arch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
arch not in ('i686', 'x86_64')):
LOG.warning(_('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: https://wiki.openstack.org/wiki/'
'HypervisorSupportMatrix'),
{'type': CONF.libvirt.virt_type, 'arch': arch})
def init_host(self, host):
self._do_quality_warnings()
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.'),
{'major': major, 'minor': minor, 'micro': micro})
self._init_events()
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug(_('Connecting to libvirt: %s'), self.uri())
wrapped_conn = None
try:
wrapped_conn = self._connect(self.uri(), self.read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = DISABLE_REASON_UNDEFINED
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
self._set_host_enabled(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug(_("Registering for lifecycle events %s"), self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_("URI %(uri)s does not support events: %(error)s"),
{'uri': self.uri(), 'error': e})
try:
LOG.debug(_("Registering for connection events: %s") %
str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug(_("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s"), e)
except libvirt.libvirtError as e:
LOG.warn(_("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self.uri(), 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
_conn = property(_get_connection)
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
@staticmethod
def uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _connect(uri, read_only):
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
LOG.warning(
_("Can not handle authentication request for %d credentials")
% len(creds))
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
_connect_auth_cb,
None]
try:
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
def instance_exists(self, instance_name):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance_name)
return True
except exception.NovaException:
return False
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
names.append(domain.name())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
names.extend([vm for vm in self._conn.listDefinedDomains()
if vm not in names])
return names
def list_instance_uuids(self):
uuids = set()
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
uuids.add(domain.UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
for domain_name in self._conn.listDefinedDomains():
try:
uuids.add(self._lookup_by_name(domain_name).UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
return list(uuids)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def unplug_vifs(self, instance, network_info, ignore_errors=False):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
container_root_device = instance.get('root_device_name')
disk.teardown_container(container_dir, container_root_device)
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_("Cannot destroy instance, operation time out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.InstanceNotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_("Going to destroy instance again."), instance=instance)
self._destroy(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info, ignore_errors=True)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be patially created
# or deleted
ctxt.reraise = False
LOG.warn(_("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks:
self._delete_instance_files(instance)
self._cleanup_lvm(instance)
#NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
def _cleanup_rbd(self, instance):
pool = CONF.libvirt.images_rbd_pool
volumes = libvirt_utils.list_rbd_volumes(pool)
pattern = instance['uuid']
def belongs_to_instance(disk):
return disk.startswith(pattern)
volumes = filter(belongs_to_instance, volumes)
if volumes:
libvirt_utils.remove_rbd_volumes(pool, *volumes)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['uuid']
# TODO(sdague): remove in Juno
def belongs_to_instance_legacy(disk):
# We don't want to leak old disks, but at the same time, we
# don't want to do an unsafe thing. So we will only handle
# the old filter if it's the system default still.
pattern = '%s_' % instance['name']
if disk.startswith(pattern):
if CONF.instance_name_template == 'instance-%08x':
return True
else:
LOG.warning(_('Volume %(disk)s possibly unsafe to '
'remove, please clean up manually'),
{'disk': disk})
return False
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = libvirt_utils.list_logical_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
# TODO(sdague): remove in Juno
disk_names.extend(
filter(belongs_to_instance_legacy, logical_volumes)
)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug(_('Could not determine iscsi initiator name'),
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide node names'),
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide port names'),
instance=instance)
connector = {'ip': CONF.my_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
if not self.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, bdm)
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
# cache device_path in connection_info -- required by encryptors
if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def _swap_volume(self, domain, disk_path, new_path):
"""Swap existing disk with a new block device."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
domain.blockRebase(disk_path, new_path, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path,
libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
finally:
self._conn.defineXML(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
conf = self.volume_driver_method('connect_volume',
new_connection_info,
disk_info)
if not conf.source_path:
self.volume_driver_method('disconnect_volume',
new_connection_info,
disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
self._swap_volume(virt_dom, disk_dev, conf.source_path)
self.volume_driver_method('disconnect_volume',
old_connection_info,
disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self.to_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_("During detach_volume, instance disappeared."))
else:
raise
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = flavor_obj.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
flavor)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(instance)
def detach_interface(self, instance, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = flavor_obj.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
cfg = self.vif_driver.get_config(instance, vif, None, flavor)
try:
self.vif_driver.unplug(instance, vif)
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(instance)
def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if instance['os_type']:
metadata['properties']['os_type'] = instance['os_type']
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
metadata['container_format'] = base.get('container_format', 'bare')
return metadata
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
base = compute_utils.get_image_metadata(
context, image_service, image_id, instance)
_image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.libvirt.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(base,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm" and not source_format == 'rbd':
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(virt_dom,
pci_manager.get_instance_pci_devs(instance))
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
image_type=source_format)
if live_snapshot:
LOG.info(_("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the temp directory
os.chmod(tmpdir, 0o701)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
new_dom = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
new_dom = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
new_dom = self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if new_dom is not None:
self._attach_pci_devices(new_dom,
pci_manager.get_instance_pci_devs(instance))
LOG.info(_("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
LOG.info(_("Snapshot image upload complete"),
instance=instance)
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
"""
status = domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if cur == end:
return False
else:
return True
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
msg = _('Failed to send updated snapshot status '
'to volume service.')
LOG.exception(msg)
def _volume_snapshot_create(self, context, instance, domain,
volume_id, snapshot_id, new_file):
"""Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param snapshot_id: UUID of snapshot being created
:param new_file: relative path to new qcow2 file present on share
"""
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
disks_to_skip = [] # local disks not snapshotted
for disk in device_info.devices:
if (disk.root_name != 'disk'):
continue
if (disk.target_dev is None):
continue
if (disk.serial is None or disk.serial != volume_id):
disks_to_skip.append(disk.source_path)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': disk.target_dev,
'serial': disk.serial,
'current_file': disk.source_path
}
# Determine path for new_file based on current path
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
if not disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug(_("snap xml: %s") % snapshot_xml)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml,
snap_flags | QUIESCE)
return
except libvirt.libvirtError:
msg = _('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.')
LOG.exception(msg)
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
msg = _('Unable to create VM snapshot, '
'failing volume_snapshot operation.')
LOG.exception(msg)
raise
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug(_("volume_snapshot_create: create_info: %(c_info)s"),
{'c_info': create_info}, instance=instance)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom,
volume_id, snapshot_id,
create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _('Error occurred during volume_snapshot_create, '
'sending error status to Cinder.')
LOG.exception(msg)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug(_('volume_snapshot_delete: delete_info: %s') % delete_info)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
##### Find dev name
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
for disk in device_info.devices:
if (disk.root_name != 'disk'):
continue
if (disk.target_dev is None or disk.serial is None):
continue
if disk.serial == volume_id:
my_dev = disk.target_dev
active_disk = disk.source_path
if my_dev is None or active_disk is None:
msg = _('Unable to locate disk matching id: %s') % volume_id
raise exception.NovaException(msg)
LOG.debug(_("found dev, it's %(dev)s, with active disk: %(disk)s"),
{'dev': my_dev, 'disk': active_disk})
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_base = delete_info['file_to_merge']
rebase_bw = 0
rebase_flags = 0
LOG.debug(_('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s') %
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base,
rebase_bw, rebase_flags)
if result == 0:
LOG.debug(_('blockRebase started successfully'))
while self._wait_for_block_job(virt_dom, rebase_disk,
abort_on_error=True):
LOG.debug(_('waiting for blockRebase job completion'))
time.sleep(0.5)
else:
# commit with blockCommit()
commit_disk = my_dev
commit_base = delete_info['merge_target_file']
commit_top = delete_info['file_to_merge']
bandwidth = 0
flags = 0
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
bandwidth, flags)
if result == 0:
LOG.debug(_('blockCommit started successfully'))
while self._wait_for_block_job(virt_dom, commit_disk,
abort_on_error=True):
LOG.debug(_('waiting for blockCommit job completion'))
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info=None):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
msg = _('Error occurred during volume_snapshot_delete, '
'sending error status to Cinder.')
LOG.exception(msg)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug(_("Instance soft reboot failed: %s"), e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
# Convert the system metadata to image metadata
image_meta = utils.get_image_from_system_metadata(system_meta)
if not image_meta:
image_ref = instance.get('image_ref')
service, image_id = glance.get_remote_image_service(context,
image_ref)
image_meta = compute_utils.get_image_metadata(context,
service,
image_id,
instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self.get_instance_disk_info(instance['name'], xml,
block_device_info)
instance_dir = libvirt_utils.get_instance_path(instance)
self._create_images_and_backing(context, instance, instance_dir,
disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(context, xml, instance,
network_info, block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
if self.instance_exists(instance['name']):
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
'image_id': CONF.libvirt.rescue_image_id or instance['image_ref'],
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance['kernel_id']),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance['ramdisk_id']),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self.to_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self.to_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
fp = open(fpath, 'a+')
fp.write(data)
return fpath
def get_console_output(self, context, instance):
virt_dom = self._lookup_by_name(instance.name)
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes '
'ignored'), remaining, instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug(_("This python runtime does not support direct I/O"))
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 512 byte alignment
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(r"x" * align_size)
os.write(f, m)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'") %
{'path': dirpath, 'ex': str(e)})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
max_size=None):
if not is_block_dev:
self._create_local(target, ephemeral_size)
# Run as root only for block devices.
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev)
@staticmethod
def _create_swap(target, swap_mb, max_size=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
@staticmethod
def _get_disk_config_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config')
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _chown_disk_config_for_instance(self, instance):
disk_config = self._get_disk_config_path(instance)
if os.path.exists(disk_config):
libvirt_utils.chown(disk_config, os.getuid())
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
"""Determines whether the VM is booting from volume
Determines whether the disk mapping indicates that the VM
is booting from a volume.
"""
return ((not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping)
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
"""Injects data in an disk image
Helper used for injecting data in a disk image file system.
Keyword arguments:
instance -- a dict that refers instance specifications
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
suffix -- a string used as a image name suffix
"""
# Handles the partition need to be used.
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance['key_data'])
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
# Handles the network injection.
net = netutils.get_injected_network_template(network_info)
# Handles the metadata injection
metadata = instance.get('metadata')
image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_path = self.image_backend.image(
instance,
'disk' + suffix,
image_type).path
img_id = instance['image_ref']
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True):
if not suffix:
suffix = ''
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(yaguang): For evacuate disk.config already exist in shared
# storage, chown it.
self._chown_disk_config_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = flavors.extract_flavor(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * units.Gi
if size == 0 or suffix == '.rescue':
size = None
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = disk.get_fs_type_for_os_type(
instance['os_type'])
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
disk_image.cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = self._get_disk_config_path(instance)
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection only if needed
elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
LOG.warn(_('File injection into a boot from volume '
'instance is not supported'), instance=instance)
self._inject_data(
instance, network_info, admin_pass, files, suffix)
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=str(exc))
def _detach_pci_devices(self, dom, pci_devs):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if not had this version
if not self.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=pci_devs)
try:
for dev in pci_devs:
dom.detachDeviceFlags(self.get_guest_pci_device(dev).to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if d.type == 'pci']:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev['address'])
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, dom, pci_devs):
try:
for dev in pci_devs:
dom.attachDevice(self.get_guest_pci_device(dev).to_xml())
except libvirt.libvirtError:
LOG.error(_('Attaching PCI devices %(dev)s to %(dom)s failed.')
% {'dev': pci_devs, 'dom': dom.ID()})
raise
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = service_obj.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug(_('Updating compute service status to %s'),
status_name[disable_service])
else:
LOG.debug(_('Not overriding manual compute service '
'status with: %s'),
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_('Cannot update service status on host: %s,'
'since it is not registered.') % CONF.host)
except Exception:
LOG.warn(_('Cannot update service status on host: %s,'
'due to an unexpected exception.') % CONF.host,
exc_info=True)
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self._conn.baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
self._caps.host.cpu.parse_str(features)
except libvirt.VIR_ERR_NO_SUPPORT:
# Note(yjiang5): ignore if libvirt has no support
pass
return self._caps
def get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
policy for configuring a guest to match
"""
caps = self.get_host_capabilities()
hostcpu = caps.host.cpu
guestcpu = vconfig.LibvirtConfigGuestCPU()
guestcpu.model = hostcpu.model
guestcpu.vendor = hostcpu.vendor
guestcpu.arch = hostcpu.arch
guestcpu.match = "exact"
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
guestcpu.features.append(guestfeat)
return guestcpu
def get_guest_cpu_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if mode is None:
if ((CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu")):
mode = "host-model"
else:
mode = "none"
if mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
elif mode == "custom":
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = model
elif mode == "host-model":
cpu = self.get_host_cpu_for_guest()
elif mode == "host-passthrough":
msg = _("Passthrough of the host CPU was requested but "
"this libvirt version does not support this feature")
raise exception.NovaException(msg)
return cpu
def get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self.get_hypervisor_version())
def get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt.virt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self.get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self.get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
block_device.prepend_dev(disklocal.target_dev)})
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self.get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': block_device.prepend_dev(
diskswap.target_dev)})
for vol in block_device_mapping:
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
cfg = self.volume_driver_method('connect_volume',
connection_info,
info)
devices.append(cfg)
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for d in devices:
self.set_cache_mode(d)
if (image_meta and
image_meta.get('properties', {}).get('hw_scsi_model')):
hw_scsi_model = image_meta['properties']['hw_scsi_model']
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self.get_host_uuid()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device['address'])
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
flavor = flavor_obj.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
CONSOLE = "console=tty0 console=ttyS0"
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt.virt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
guest.cpuset = CONF.vcpu_pin_set
quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
for key, value in flavor.extra_specs.iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in quota_items:
setattr(guest, scope[1], value)
guest.cpu = self.get_guest_cpu_config()
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': root_device_name})
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt.virt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt.virt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt.virt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt.virt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
if CONF.libvirt.virt_type in ("kvm", "qemu"):
caps = self.get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self.get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
if (image_meta is not None and image_meta.get('properties') and
image_meta['properties'].get('hw_machine_type')
is not None):
guest.os_mach_type = \
image_meta['properties']['hw_machine_type']
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == "armv7l":
guest.os_mach_type = "vexpress-a15"
if caps.host.cpu.arch == "aarch64":
guest.os_mach_type = "virt"
if CONF.libvirt.virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif CONF.libvirt.virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt.virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt.virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
if (image_meta and
image_meta.get('properties', {}).get('os_command_line')):
guest.os_cmdline = \
image_meta['properties'].get('os_command_line')
if ((CONF.libvirt.virt_type != "lxc" and
CONF.libvirt.virt_type != "uml")):
guest.acpi = True
guest.apic = True
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if instance['os_type'] == 'windows':
LOG.info(_('Configuring timezone for windows instance to '
'localtime'), instance=instance)
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if CONF.libvirt.virt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
arch = libvirt_utils.get_arch(image_meta)
if arch in ("i686", "x86_64"):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
for cfg in self.get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
flavor):
guest.add_device(cfg)
for vif in network_info:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
flavor)
guest.add_device(cfg)
if ((CONF.libvirt.virt_type == "qemu" or
CONF.libvirt.virt_type == "kvm")):
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
consolepty.type = "pty"
guest.add_device(consolepty)
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc_enabled and
CONF.libvirt.virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if CONF.spice.enabled and \
CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta['properties'], which
# is carried out in the next if statement below this one.
arch = libvirt_utils.get_arch(image_meta)
if guest.os_type == vm_mode.XEN:
video.type = 'xen'
elif arch in ('ppc', 'ppc64'):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if img_meta_prop.get('hw_video_model'):
video.type = img_meta_prop.get('hw_video_model')
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = int(img_meta_prop.get('hw_video_ram', 0))
max_vram = int(flavor.extra_specs
.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram
guest.add_device(video)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
qga_enabled = False
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no')
if hw_qga.lower() == 'yes':
LOG.debug(_("Qemu guest agent is enabled through image "
"metadata"), instance=instance)
qga_enabled = True
if qga_enabled:
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance['name']))
guest.add_device(qga)
if (img_meta_prop.get('hw_rng_model') == 'virtio' and
flavor.extra_specs.get('hw_rng:allowed',
'').lower() == 'true'):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
if (CONF.libvirt.rng_dev_path and
not os.path.exists(CONF.libvirt.rng_dev_path)):
raise exception.RngDeviceNotExist(
path=CONF.libvirt.rng_dev_path)
rng_device.backend = CONF.libvirt.rng_dev_path
guest.add_device(rng_device)
if CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self.get_guest_pci_device(pci_dev))
else:
if len(pci_manager.get_instance_pci_devs(instance)) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=CONF.libvirt.virt_type)
watchdog_action = flavor.extra_specs.get('hw_watchdog_action',
'disabled')
if (image_meta is not None and
image_meta.get('properties', {}).get('hw_watchdog_action')):
watchdog_action = image_meta['properties']['hw_watchdog_action']
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
return guest
def to_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
# We should get image metadata every time for generating xml
if image_meta is None:
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
image_meta = compute_utils.get_image_metadata(
context, image_service, image_id, instance)
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
LOG.debug(_('Start to_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s'
'block_device_info=%(block_device_info)s'),
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info},
instance=instance)
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('End to_xml xml=%(xml)s'),
{'xml': xml}, instance=instance)
return xml
def _lookup_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
'cpu_time': cpu_time,
'id': virt_dom.ID()}
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
inst_path = None
if instance:
inst_path = libvirt_utils.get_instance_path(instance)
if CONF.libvirt.virt_type == 'lxc':
if not inst_path:
inst_path = None
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
container_root_device = disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
#Note(GuanQiang): save container root device name here, used for
# detaching the linked image device when deleting
# the lxc instance.
if container_root_device:
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': container_root_device})
if xml:
try:
domain = self._conn.defineXML(xml)
except Exception as e:
LOG.error(_("An error occurred while trying to define a domain"
" with xml: %s") % xml)
raise e
if power_on:
try:
domain.createWithFlags(launch_flags)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while trying to launch a "
"defined domain with xml: %s") %
domain.XMLDesc(0))
if not utils.is_neutron():
try:
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while enabling hairpin "
"mode on domain with xml: %s")
% domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt.virt_type == 'lxc':
state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
if state == power_state.RUNNING:
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
return domain
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.NovaException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
@staticmethod
def _conn_supports_start_paused():
return CONF.libvirt.virt_type in ('kvm', 'qemu')
def _create_domain_and_network(self, context, xml, instance, network_info,
block_device_info=None, power_on=True,
reboot=False, vifs_already_plugged=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, vol)
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# cache device_path in connection_info -- required by encryptors
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
connection_info['data']['device_path'] = conf.source_path
vol['connection_info'] = connection_info
vol.save(context)
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused() and
utils.is_neutron() and not
vifs_already_plugged and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
domain = self._create_domain(
xml, instance=instance,
launch_flags=launch_flags,
power_on=power_on)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.NovaException:
# Neutron reported failure and we didn't swallow it, so
# bail here
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warn(_('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance['uuid']})
if CONF.vif_plugging_is_fatal:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
# Resume only if domain has been paused
if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED:
domain.resume()
return domain
def get_all_block_devices(self):
"""Return all block devices in use on this node."""
devices = []
for dom_id in self.list_instance_ids():
try:
domain = self._lookup_by_id(dom_id)
doc = etree.fromstring(domain.XMLDesc(0))
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def get_disks(self, instance_name):
"""Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
xml = domain.XMLDesc(0)
try:
doc = etree.fromstring(xml)
except Exception:
return []
return filter(bool,
[target.get("dev")
for target in doc.findall('devices/disk/target')])
def get_interfaces(self, xml):
"""Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = cpu.get_cpuset_ids()
if available_ids[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = libvirt_utils.get_volume_group_info(
CONF.libvirt.images_volume_group)
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / units.Gi
return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
if CONF.libvirt.virt_type == 'lxc':
return total + 1
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._lookup_by_id(dom_id)
try:
vcpus = dom.vcpus()
except libvirt.libvirtError as e:
LOG.warn(_("couldn't obtain the vpu count from domain id:"
" %(id)s, exception: %(ex)s") %
{"id": dom_id, "ex": e})
else:
if vcpus is not None and len(vcpus) > 1:
total += len(vcpus[1])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
def get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
m = open('/proc/meminfo').read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
try:
dom_mem = int(self._lookup_by_id(domain_id).info()[2])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s")
% domain_id)
continue
# skip dom0
if domain_id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / units.Ki
def get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self._conn.getHostname()
if not hasattr(self, '_hypervisor_hostname'):
self._hypervisor_hostname = hostname
elif hostname != self._hypervisor_hostname:
LOG.error(_('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'
) % {'old': self._hypervisor_hostname,
'new': hostname})
return self._hypervisor_hostname
def get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self.get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self.get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if len(fun_cap.device_addrs) != 0:
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
phys_address = "%s:%s:%s.%s" % (
fun_cap.device_addrs[0][0].replace("0x", ''),
fun_cap.device_addrs[0][1].replace("0x", ''),
fun_cap.device_addrs[0][2].replace("0x", ''),
fun_cap.device_addrs[0][3].replace("0x", ''))
return {'dev_type': 'type-VF',
'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": cfgdev.pci_capability.product_id[2:6],
"vendor_id": cfgdev.pci_capability.vendor_id[2:6],
}
#requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
def _pci_device_assignable(self, device):
if device['dev_type'] == 'type-PF':
return False
return self.dev_filter.device_assignable(device)
def get_pci_passthrough_devices(self):
"""Get host pci devices information.
Obtains pci devices information from libvirt, and returns
as a json string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a list of the assignable pci devices information
"""
pci_info = []
dev_names = self._conn.listDevices('pci', 0) or []
for name in dev_names:
pci_dev = self._get_pcidev_info(name)
if self._pci_device_assignable(pci_dev):
pci_info.append(pci_dev)
return jsonutils.dumps(pci_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug(_("Trying to get stats for the volume %s"),
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
_("Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d"),
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance_name, disk):
"""Note that this function takes an instance name."""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance_name, 'disk': disk,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, interface):
"""Note that this function takes an instance name."""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
#TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
"""
# Temporary: convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
stats = self.get_host_stats(refresh=True)
stats['supported_instances'] = jsonutils.dumps(
stats['supported_instances'])
return stats
def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage."),
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb
# Compare CPU
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
is_volume_backed = dest_check_data.get('is_volume_backed', False)
has_local_disks = bool(
jsonutils.loads(self.get_instance_disk_info(instance['name'])))
shared = self._check_shared_storage_test_file(filename)
if block_migration:
if shared:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not shared and (not is_volume_backed or has_local_disks):
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
dest_check_data.update({"is_shared_storage": shared})
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance['uuid'],
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openAuth(...).getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt.virt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = unicode(e)
LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
if not time_module:
time_module = greenthread
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance.name)
time_module.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.libvirt.block_migration_flag.split(',')
else:
flaglist = CONF.libvirt.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance["name"])
dom.migrateToURI(CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info):
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_storage = True
is_volume_backed = False
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if not is_shared_storage:
# NOTE(mikal): block migration of instances using config drive is
# not supported because of a bug in libvirt (read only devices
# are not copied by libvirt). See bug/1246201
if configdrive.required_by(instance):
raise exception.NoBlockMigrationForConfigDriveInLibVirt()
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
# Ensure images and backing files are present.
self._create_images_and_backing(context, instance, instance_dir,
disk_info)
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, vol)
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info_json):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info_json:
json strings specified in get_instance_disk_info
"""
if not disk_info_json:
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance["os_type"],
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=instance['ephemeral_gb'])
elif cache_name.startswith('swap'):
inst_type = flavors.extract_flavor(instance)
swap_mb = inst_type['swap']
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
image.cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, block_device_info)
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._conn.defineXML(xml)
def get_instance_disk_info(self, instance_name, xml=None,
block_device_info=None):
"""Retrieve information about actual disk sizes of an instance.
:param instance_name:
name of a nova instance as returned by list_instances()
:param xml:
Optional; Domain XML of given libvirt instance.
If omitted, this method attempts to extract it from the
pre-existing definition.
:param block_device_info:
Optional; Can be used to filter out devices which are
actually volumes.
:return:
json strings with below format::
"[{'path':'disk', 'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'},...]"
"""
if xml is None:
try:
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug(_('skipping disk for %s as it does not have a path'),
instance_name)
continue
if disk_type != 'file':
LOG.debug(_('skipping %s since it looks like volume'), path)
continue
if target in volume_devices:
LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
'volume'), {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
self.get_instance_disk_info(i_name))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except OSError as e:
if e.errno == errno.ENOENT:
LOG.warning(_('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': i_name})
else:
raise
except exception.InstanceNotFound:
# Instance was deleted during the check so ignore it
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
# Extract node's CPU statistics.
stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._conn.getInfo()[3]
return stats
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
# Checks if the migration needs a disk resize down.
for kind in ('root_gb', 'ephemeral_gb'):
if flavor[kind] < instance[kind]:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info_text = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
def rename_virtualmachine(self, context, instance):
LOG.info('Doesn\'t actually call the rename method')
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_migration"), instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
size *= units.Gi
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_image(info['path'], size) and
disk.is_image_partitionless(info['path'], use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)
utils.execute('mv', path_raw, info['path'])
fmt = 'raw'
if size:
use_cow = fmt == 'qcow2'
disk.extend(info['path'], size, use_cow=use_cow)
if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
self._create_image(context, instance,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False)
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(disk)
output[disk + "_read_req"] = stats[0]
output[disk + "_read"] = stats[1]
output[disk + "_write_req"] = stats[2]
output[disk + "_write"] = stats[3]
output[disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessibility %s'), instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def _delete_instance_files(self, instance):
# NOTE(mikal): a shim to handle this file not using instance objects
# everywhere. Remove this when that conversion happens.
context = nova_context.get_admin_context(read_deleted='yes')
inst_obj = instance_obj.Instance.get_by_uuid(context, instance['uuid'])
# NOTE(mikal): this code should be pushed up a layer when this shim is
# removed.
attempts = int(inst_obj.system_metadata.get('clean_attempts', '0'))
success = self.delete_instance_files(inst_obj)
inst_obj.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
inst_obj.cleaned = True
inst_obj.save(context)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
if os.path.exists(target):
LOG.info(_('Deleting instance files %s'), target,
instance=instance)
try:
shutil.rmtree(target)
except OSError as e:
LOG.error(_('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if os.path.exists(target):
LOG.info(_('Deletion of %s failed'), target, instance=instance)
return False
LOG.info(_('Deletion of %s complete'), target, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus,
cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
ephemerals, swap, block_device_mapping = block_device_lists[:3]
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance, root_device_name,
ephemerals, swap,
block_device_mapping)
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = (self.driver.
get_disk_over_committed_size_total())
# Disk available least size
available_least = disk_free_gb * units.Gi - disk_over_committed
return (available_least / units.Gi)
LOG.debug(_("Updating host stats"))
disk_info_dict = self.driver.get_local_gb_info()
data = {}
#NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = \
self.driver.get_instance_capabilities()
data["vcpus"] = self.driver.get_vcpu_total()
data["memory_mb"] = self.driver.get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self.driver.get_vcpu_used()
data["memory_mb_used"] = self.driver.get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self.driver.get_hypervisor_type()
data["hypervisor_version"] = self.driver.get_hypervisor_version()
data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname()
data["cpu_info"] = self.driver.get_cpu_info()
data['disk_available_least'] = _get_disk_available_least()
data['pci_passthrough_devices'] = \
self.driver.get_pci_passthrough_devices()
self._stats = data
return data
|
{
"content_hash": "8314b21a05f05160578692483f2b6eec",
"timestamp": "",
"source": "github",
"line_count": 5250,
"max_line_length": 79,
"avg_line_length": 42.82495238095238,
"alnum_prop": 0.5392050028688214,
"repo_name": "shhui/nova",
"id": "bf771be4a261878110f359064d76898564940dab",
"size": "225758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "5874"
},
{
"name": "Diff",
"bytes": "23363"
},
{
"name": "Groff",
"bytes": "112"
},
{
"name": "Python",
"bytes": "13937011"
},
{
"name": "Shell",
"bytes": "48928"
},
{
"name": "Smarty",
"bytes": "595873"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ContainerStateRunning(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'started_at': 'datetime'
}
attribute_map = {
'started_at': 'startedAt'
}
def __init__(self, started_at=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerStateRunning - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._started_at = None
self.discriminator = None
if started_at is not None:
self.started_at = started_at
@property
def started_at(self):
"""Gets the started_at of this V1ContainerStateRunning. # noqa: E501
Time at which the container was last (re-)started # noqa: E501
:return: The started_at of this V1ContainerStateRunning. # noqa: E501
:rtype: datetime
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this V1ContainerStateRunning.
Time at which the container was last (re-)started # noqa: E501
:param started_at: The started_at of this V1ContainerStateRunning. # noqa: E501
:type: datetime
"""
self._started_at = started_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerStateRunning):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerStateRunning):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "0875820e1e49f4bd5ebc51674fdfdc78",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 124,
"avg_line_length": 30.141666666666666,
"alnum_prop": 0.5792092894664086,
"repo_name": "kubernetes-client/python",
"id": "b700176cdcf8432abf18d2bcf357324641a7e2c2",
"size": "3634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_container_state_running.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from string import Template
# run.py by default will delete cloudformation templates and buckets created by this script and create everything from scratch.
# other parameters (yet to be defined) will roll out a smaller change
import boto3
import botocore
import os
import argparse
import re
import tempfile
import shutil
import importlib
import cformation
from cformation import *
import json
# in the cformation directory
CFORMATION_TEMPLATES = [cformation.master, cformation.ingest, cformation.elasticsearch, cformation.firehose, cformation.codepipeline, cformation.api_lambda]
GIT_PERSONAL_ACCESS_TOKEN = 'GIT_PERSONAL_ACCESS_TOKEN'
SECRETS = set(['TWITTER_CONSUMER_KEY', 'TWITTER_CONSUMER_SECRET', 'TWITTER_ACCESS_TOKEN', 'TWITTER_ACCESS_SECRET', GIT_PERSONAL_ACCESS_TOKEN])
# command line parsing
parser = argparse.ArgumentParser(description="""Look at the switches below and notice the commands.
You can run any of these commands individually.
If none of the commands are specified then following commands will be run in order: -code, -codeupload, -template, -update.
Before the create or update commands are run the twitter credentials will be verified.
If twitter credential verification fails, fix the problem by setting the env variables and using the -p switch.
In addition it can be useful to execute -template followed by -update to make changes to the templates then update the existing stack.
The -name parameter can be used to create multiple stacks, you must use the same name to delete, update, etc.
""")
parser.add_argument('-delete', action='store_true', help='command: delete the buckets and the stack')
parser.add_argument('-code', action='store_true', help='command: generate code in temporary directory')
parser.add_argument('-codeupload', action='store_true', help='command: upload the code generated by the code command, create s3 bucket, upload code s3 bucket')
parser.add_argument('-template', action='store_true', help='command: generate cloudformation templates in the temporary directory, create cfn bucket, put the templates into the cfn bucket')
parser.add_argument('-create', action='store_true', help='command: create the stack from the cloud formation templates created with -template')
parser.add_argument('-update', action='store_true', help='command: update the stack (normally run with -template)')
parser.add_argument('-creds', action='store_true', help='command: store secrets, set these environment variables then use the -creds option to store them:' + str(SECRETS))
parser.add_argument('-credsprint', action='store_true', help='command: print the stored secrets use after -creds to see the restuls')
parser.add_argument('-n', action='store', default='bigimage', help='name any global objects, like s3 buckets, with this name')
parser.add_argument('-s', action='store_true', help='run silently')
args = parser.parse_args()
# globals for getting to boto3
clientSts = boto3.client('sts')
clientCloudformation = boto3.client('cloudformation')
clientS3 = boto3.client('s3')
s3 = boto3.resource('s3')
ssm = boto3.client('ssm')
# args
ARG_SILENT = args.s
ARG_NAME = args.n
# commands
ARG_DELETE = args.delete
ARG_CODE = args.code
ARG_CODEUPLOAD = args.codeupload
ARG_TEMPLATE = args.template
ARG_CREATE = args.create
ARG_UPDATE = args.update
ARG_CREDS = args.creds
ARG_CREDSPRINT = args.credsprint
if (not ARG_DELETE) and (not ARG_CODE) and (not ARG_TEMPLATE) and (not ARG_CREATE) and (not ARG_UPDATE) and (not ARG_CREDS) and (not ARG_CREDSPRINT):
ARG_CODE = True
ARG_CODEUPLOAD = True
ARG_TEMPLATE = True
ARG_CREATE = True
ARG_UPDATE = True
# constants
STACK_NAME=ARG_NAME
ACCOUNT_ID = clientSts.get_caller_identity()['Account']
S3_TEMPLATE_BUCKET = STACK_NAME + 'cfn' + ACCOUNT_ID # cloudformation template are generated in this script and kept here
S3_CODE_BUCKET=STACK_NAME + 'code' + ACCOUNT_ID # code is generated in this script and kept here
REGION = boto3.session.Session().region_name
MASTER_TEMPLATE='master.cfn.json'
def silentPrint(*args):
if not ARG_SILENT:
print(*args)
class NamedSecrets:
'''Manage a list of secrets for a specific stack name (see -n parameter) in the AWS parameter store
invoked secrets = NamedSecrets(name, secretSet) where name is the name of the stack and secretSet is a set of secrets to keep track of
'''
def __init__(self, name, secrets):
self.name = name
self.secrets = secrets
def _namedSecret(self, secret):
"return a named secret by prepending the name (see -n parameter) to the secret"
return self.name + "_" + secret
def _namedSecrets(self):
"return a set of named twitter variables"
ret = set()
for secret in self.secrets:
ret.add(self._namedSecret(secret))
return ret
def readVerifyRememberStoredSecrets(self):
"veriy that all of the secrets have been stored"
# only need to call this function one time, the results are remembered
if hasattr(self, 'namedSecretValues'):
return True
namedSecrets = list(self._namedSecrets())
ret = ssm.get_parameters(Names=namedSecrets, WithDecryption=True)
namedSecretValues = {}
parameters = ret[u'Parameters']
for parameter in parameters:
namedSecretValues[parameter['Name']] = parameter['Value']
invalidParameters = ret['InvalidParameters']
if len(invalidParameters) > 0:
print("missing system parameters:", str(invalidParameters))
return False
returnedNamedSecrets = set(namedSecretValues.keys())
if returnedNamedSecrets != self._namedSecrets():
print("all secrets not stored, crazy, secrets returned:", returnedNamedSecrets, "secrets expected:", self._namedSecrets())
return False
self.namedSecretValues = namedSecretValues
return True
def getRememberedSecretValue(self, secret):
"call after readVerifyRememberStoredSecrets() to return the value associated with a secret"
return self.namedSecretValues[self._namedSecret(secret)]
def storeSecrets(self):
"store the secrets as Simple System Management System Parameters, return false if something goes wrong"
ret = True
parameterStore = {}
for secret in self.secrets:
if not os.environ.has_key(secret):
print(secret, "not in environment")
ret = False
continue
value = os.environ[secret]
if value == "":
print(secret, "in environment but does not have a vlue")
ret = False
continue
parameterStore[self._namedSecret(secret)] = value
if not ret:
return False
for secret in parameterStore:
value = parameterStore[secret]
ssm.put_parameter(Name=secret, Value=value, Type='SecureString', Overwrite=True)
if not self.readVerifyRememberStoredSecrets():
return False
if not (parameterStore == self.namedSecretValues):
print("values stored do not match the environment variables, crazy")
return True
def masterTemplateParameters(secrets):
'return the master template parameters'
gitParameterValue = secrets.getRememberedSecretValue(GIT_PERSONAL_ACCESS_TOKEN)
MASTER_TEMPLATE_PARAMETERS=[{
'ParameterKey': 'TemplateBucket',
'ParameterValue': '{}/{}'.format(clientS3.meta.endpoint_url, S3_TEMPLATE_BUCKET),
'UsePreviousValue': False
},{
'ParameterKey': 'CodeBucket',
'ParameterValue': S3_CODE_BUCKET,
'UsePreviousValue': False
},{
'ParameterKey': 'GitPersonalAccessToken',
'ParameterValue': gitParameterValue,
'UsePreviousValue': False
}]
return MASTER_TEMPLATE_PARAMETERS
# each module must have id() and template() functions.
# id() will return the name of the json file that should be generated
# template() will return the trophoshpere template
TROPHOSPHERE_NAME_TEMPLATE = {}
for moduleName in CFORMATION_TEMPLATES:
TROPHOSPHERE_NAME_TEMPLATE.update({moduleName.id(): moduleName.template(STACK_NAME)})
# directories that contain a makeawszip command that will create a zip
MAKEAWSZIP_DIRS = ['python-v1']
def bucketCreate(bucket):
bucket.create(CreateBucketConfiguration={'LocationConstraint': REGION}, GrantRead='uri="http://acs.amazonaws.com/groups/global/AllUsers"')
def bucketDelete(bucketName):
'return a new bucket, clean all keys in bucket and delete if they currently exist'
silentPrint('deleting bucket:', bucketName)
bucket = s3.Bucket(bucketName)
try:
for key in bucket.objects.all():
key.delete()
silentPrint('delete key, bucket:', key.bucket_name, 'key:', key.key)
bucket.delete()
silentPrint('deleted bucket:', bucket.name)
except:
pass
return bucket
def bucketExists(bucket):
'return True if the bucket exists'
return bucket in s3.buckets.all()
def bucketNew(bucket):
'create a new bucket'
bucketCreate(bucket)
bucket_policy = s3.BucketPolicy(bucket.name)
policy=Template('''{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AddPerm",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::$bucket/*"
}
]
}
''').substitute(bucket=S3_TEMPLATE_BUCKET)
silentPrint("s3 teamplate bucket policy:", policy)
bucket_policy.put(Policy=policy)
silentPrint('create bucket:', bucket.name)
return bucket
def bucketPopulate(tempDir, bucket):
'populate the bucket with the cf.json files in this directory'
for key in TROPHOSPHERE_NAME_TEMPLATE:
localFile = os.path.join(tempDir, key)
silentPrint("upload:", localFile, "url:", s3Url(bucket, key))
bucket.upload_file(localFile, key)
def generateCfn(outputDir):
'generate the cloudformation templates for all the *.cfn.py files in this directory'
for key, template in TROPHOSPHERE_NAME_TEMPLATE.iteritems():
outputFile = open(os.path.join(outputDir, key), "w")
outputFile.write(template.to_json())
def stackWait(stackName, waitName):
silentPrint("waiting for cloudformation stack to be", waitName, "stack:", stackName)
waiter = clientCloudformation.get_waiter(waitName)
waiter.wait(StackName=stackName)
def stackCreateWait(stackName):
stackWait(stackName, 'stack_create_complete')
def stackUpdateWait(stackName):
stackWait(stackName, 'stack_update_complete')
def stackDeleteWait(stackName):
stackWait(stackName, 'stack_delete_complete')
def stackDelete(stackName):
try:
silentPrint("delete stack:", stackName)
clientCloudformation.delete_stack(StackName=stackName)
stackDeleteWait(stackName)
except:
return
def s3Url(s3Bucket, s3Key):
return '{}/{}/{}'.format(clientS3.meta.endpoint_url, s3Bucket.name, s3Key)
def stackCreate(stackName, templateBucket, s3MasterKey, cfnParameters):
url = s3Url(templateBucket, s3MasterKey)
response = clientCloudformation.create_stack(
StackName=stackName,
TemplateURL=url,
Parameters=cfnParameters,
#DisableRollback=True|False,
#TimeoutInMinutes=123,
#NotificationARNs=[
# 'string',
#],
Capabilities=[
'CAPABILITY_IAM'
],
#ResourceTypes=[
# 'string',
#],
#RoleARN='string',
#OnFailure='DO_NOTHING'|'ROLLBACK'|'DELETE',
#StackPolicyBody='string',
#StackPolicyURL='string',
Tags=[
{
'Key': 'project',
'Value': STACK_NAME,
},
]
)
stackCreateWait(stackName)
def stackUpdate(stackName, templateBucket, s3MasterKey, cfnParameters):
url = s3Url(templateBucket, s3MasterKey)
response = clientCloudformation.update_stack(
StackName=stackName,
TemplateURL=url,
UsePreviousTemplate=False,
# StackPolicyDuringUpdateBody
Parameters=cfnParameters,
#DisableRollback=True|False,
#TimeoutInMinutes=123,
#NotificationARNs=[
# 'string',
#],
Capabilities=[
'CAPABILITY_IAM'
],
#ResourceTypes=[
# 'string',
#],
#RoleARN='string',
#OnFailure='DO_NOTHING'|'ROLLBACK'|'DELETE',
#StackPolicyBody='string',
#StackPolicyURL='string',
Tags=[
{
'Key': 'project',
'Value': STACK_NAME,
},
]
)
stackUpdateWait(stackName)
def stackCreateChangeSet(stackName, templateBucket, s3MasterKey, cfnParameters):
url = s3Url(templateBucket, s3MasterKey)
response = clientCloudformation.create_change_set(
ChangeSetName='changeSetName',
# ClientToken='string',
#Description='string',
ChangeSetType='UPDATE',
#
StackName=stackName,
TemplateURL=url,
UsePreviousTemplate=False,
Parameters=cfnParameters,
#TimeoutInMinutes=123,
#NotificationARNs=[
# 'string',
#],
Capabilities=[
'CAPABILITY_IAM'
],
#ResourceTypes=[
# 'string',
#],
#RoleARN='string',
#OnFailure='DO_NOTHING'|'ROLLBACK'|'DELETE',
#StackPolicyBody='string',
#StackPolicyURL='string',
)
stackCreateWait(stackName)
import imp
def uploadProjectCode(bucket, inputDirBasename, outputDir):
'upload generated code from a project'
file = os.path.join(outputDir, inputDirBasename + ".zip")
key = os.path.basename(file)
silentPrint("upload:", file, "key:", key, "url:", s3Url(bucket, key))
# upload file
bucket.upload_file(file, key)
def uploadCode(outputDir, bucketName):
bucket = s3.Bucket(bucketName)
try:
bucketCreate(bucket)
bucket_versioning = s3.BucketVersioning(bucketName)
silentPrint("eable bucket_versioning:", bucket_versioning)
bucket_versioning.enable()
except:
pass
for inputDirBasename in MAKEAWSZIP_DIRS:
uploadProjectCode(bucket, inputDirBasename, outputDir)
def generateProjectCode(inputDirBasename, outputDir):
'call the inputDirBasename/build/build.py script to create a zip file to upload'
buildPath = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), inputDirBasename, "build", "build.py")
silentPrint('building code:', buildPath)
buildModule = imp.load_source("build", buildPath)
zip_file_name = os.path.join(outputDir, inputDirBasename + ".zip")
hash = buildModule.build(zip_file_name)
print(hash)
def generateCode(outputDir):
for inputDirBasename in MAKEAWSZIP_DIRS:
generateProjectCode(inputDirBasename, outputDir)
# commands --------------------------
secrets = NamedSecrets(ARG_NAME, SECRETS)
if ARG_CREDS:
silentPrint("command: creds")
if not secrets.storeSecrets():
quit()
if ARG_CREDSPRINT:
if not secrets.readVerifyRememberStoredSecrets():
quit()
print(secrets.namedSecretValues)
if ARG_DELETE:
silentPrint("command: delete")
stackDelete(STACK_NAME)
bucketDelete(S3_TEMPLATE_BUCKET)
bucketDelete(S3_CODE_BUCKET)
if False:
tempDir = tempfile.mkdtemp()
else:
tempDir = os.path.abspath("build")
silentPrint("temporary directory:", tempDir)
try:
shutil.rmtree(tempDir)
except:
pass
os.mkdir(tempDir)
# generate new code zips
if ARG_CODE:
silentPrint("command: code")
generateCode(tempDir)
if ARG_CODEUPLOAD:
silentPrint("command: codeupload")
uploadCode(tempDir, S3_CODE_BUCKET)
# populate template bucket with fresh templates. Generate them in the temporary directory then copy them to s3
templateBucket = s3.Bucket(S3_TEMPLATE_BUCKET)
if ARG_TEMPLATE:
silentPrint("command: template")
if not bucketExists(templateBucket):
bucketNew(templateBucket)
generateCfn(tempDir)
bucketPopulate(tempDir, templateBucket)
# both set then create if it does not exist and update otherwise
if ARG_CREATE and ARG_UPDATE:
try:
stack = clientCloudformation.describe_stacks(StackName=ARG_NAME)
silentPrint("stack exists, update stack")
ARG_CREATE = False # found the stack, update do not create
except botocore.exceptions.ClientError as ex:
if ("Stack with id" in str(ex)) and ("does not exist" in str(ex)):
silentPrint("stack does not exist, create stack")
ARG_UPDATE = False # stack does not exist, create the stack
else:
raise ex
if ARG_CREATE:
silentPrint("command: create")
if not secrets.readVerifyRememberStoredSecrets():
quit()
stackCreate(STACK_NAME, templateBucket, MASTER_TEMPLATE, masterTemplateParameters(secrets))
if ARG_UPDATE:
silentPrint("command: update")
if not secrets.readVerifyRememberStoredSecrets():
quit()
stackUpdate(STACK_NAME, templateBucket, MASTER_TEMPLATE, masterTemplateParameters(secrets))
#if ARG_CHANGE_SET:
# print("not implemented yet")
# quit()
# stackCreateChangeSet(STACK_NAME, templateBucket, MASTER_TEMPLATE, masterTemplateParameters(secrets))
# quit()
print("temporary directory:", tempDir)
|
{
"content_hash": "564c59da492948c6f69c4a16616d07e3",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 189,
"avg_line_length": 36.17622950819672,
"alnum_prop": 0.6730486008836525,
"repo_name": "powellquiring/bigimage",
"id": "c004333794df6f410417558b3dc0607903dd5a8a",
"size": "17676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cfm/run.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "627"
},
{
"name": "HTML",
"bytes": "1460"
},
{
"name": "JavaScript",
"bytes": "11576"
},
{
"name": "Python",
"bytes": "85439"
},
{
"name": "Shell",
"bytes": "1092"
}
],
"symlink_target": ""
}
|
from flask.globals import _app_ctx_stack, _request_ctx_stack
from werkzeug.urls import url_parse
def route_from(url, method = None):
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to match a URL without the '
'application context being pushed. This has to be '
'executed when application context is available.')
if reqctx is not None:
url_adapter = reqctx.url_adapter
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL '
'adapter for request independent URL matching. '
'You might be able to fix this by setting '
'the SERVER_NAME config variable.')
parsed_url = url_parse(url)
if parsed_url.netloc is not "" and parsed_url.netloc != url_adapter.server_name:
raise NotFound()
return url_adapter.match(parsed_url.path, method)
|
{
"content_hash": "190ce620af4aa7ddd352135f627b5f02",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 40.833333333333336,
"alnum_prop": 0.6744897959183673,
"repo_name": "haisum/flask-starting-template",
"id": "f6c2ff1229d37e61dd8f534e509976d6b9acd535",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120866"
},
{
"name": "JavaScript",
"bytes": "58274"
},
{
"name": "Python",
"bytes": "11717"
}
],
"symlink_target": ""
}
|
import json
import requests
from collections import OrderedDict
from redash.query_runner import *
# TODO: make this more general and move into __init__.py
class ResultSet(object):
def __init__(self):
self.columns = OrderedDict()
self.rows = []
def add_row(self, row):
for key in row.keys():
self.add_column(key)
self.rows.append(row)
def add_column(self, column, column_type=TYPE_STRING):
if column not in self.columns:
self.columns[column] = {'name': column, 'type': column_type, 'friendly_name': column}
def to_json(self):
return json.dumps({'rows': self.rows, 'columns': self.columns.values()})
def parse_issue(issue):
result = OrderedDict()
result['key'] = issue['key']
for k, v in issue['fields'].iteritems():
if k.startswith('customfield_'):
continue
if isinstance(v, dict):
if 'key' in v:
result['{}_key'.format(k)] = v['key']
if 'name' in v:
result['{}_name'.format(k)] = v['name']
if k in v:
result[k] = v[k]
if 'watchCount' in v:
result[k] = v['watchCount']
# elif isinstance(v, list):
# pass
else:
result[k] = v
return result
def parse_issues(data):
results = ResultSet()
for issue in data['issues']:
results.add_row(parse_issue(issue))
return results
def parse_count(data):
results = ResultSet()
results.add_row({'count': data['total']})
return results
class JiraJQL(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string',
'title': 'JIRA URL'
},
'username': {
'type': 'string',
},
'password': {
'type': 'string'
}
},
'required': ['url', 'username', 'password'],
'secret': ['password']
}
@classmethod
def name(cls):
return "JIRA (JQL)"
@classmethod
def annotate_query(cls):
return False
def __init__(self, configuration):
super(JiraJQL, self).__init__(configuration)
self.syntax = 'json'
def run_query(self, query, user):
jql_url = '{}/rest/api/2/search'.format(self.configuration["url"])
try:
query = json.loads(query)
query_type = query.pop('queryType', 'select')
if query_type == 'count':
query['maxResults'] = 1
query['fields'] = ''
response = requests.get(jql_url, params=query, auth=(self.configuration.get('username'), self.configuration.get('password')))
if response.status_code == 401 or response.status_code == 403:
return None, "Authentication error. Please check username/password."
if response.status_code != 200:
return None, "JIRA returned unexpected status code ({})".format(response.status_code)
data = response.json()
if query_type == 'count':
results = parse_count(data)
else:
results = parse_issues(data)
return results.to_json(), None
except KeyboardInterrupt:
return None, "Query cancelled by user."
register(JiraJQL)
|
{
"content_hash": "e32b2d62a42bc632f932ca755f8e21db",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 137,
"avg_line_length": 26.28148148148148,
"alnum_prop": 0.5208568207440811,
"repo_name": "guaguadev/redash",
"id": "f467134ccc574f0eb07923017f7f74821fee6d85",
"size": "3548",
"binary": false,
"copies": "1",
"ref": "refs/heads/guagua",
"path": "redash/query_runner/jql.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "239783"
},
{
"name": "HTML",
"bytes": "121423"
},
{
"name": "JavaScript",
"bytes": "279730"
},
{
"name": "Makefile",
"bytes": "955"
},
{
"name": "Nginx",
"bytes": "577"
},
{
"name": "Python",
"bytes": "501609"
},
{
"name": "Ruby",
"bytes": "709"
},
{
"name": "Shell",
"bytes": "43388"
}
],
"symlink_target": ""
}
|
from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import volume_backend
class TestShowVolumeCapability(volume_fakes.TestVolume):
"""Test backend capability functionality."""
# The capability to be listed
capability = volume_fakes.FakeCapability.create_one_capability()
def setUp(self):
super(TestShowVolumeCapability, self).setUp()
# Get a shortcut to the capability Mock
self.capability_mock = self.app.client_manager.volume.capabilities
self.capability_mock.get.return_value = self.capability
# Get the command object to test
self.cmd = volume_backend.ShowCapability(self.app, None)
def test_capability_show(self):
arglist = [
'fake',
]
verifylist = [
('host', 'fake'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
expected_columns = [
'Title',
'Key',
'Type',
'Description',
]
# confirming if all expected columns are present in the result.
self.assertEqual(expected_columns, columns)
capabilities = [
'Compression',
'Replication',
'QoS',
'Thin Provisioning',
]
# confirming if all expected values are present in the result.
for cap in data:
self.assertTrue(cap[0] in capabilities)
# checking if proper call was made to get capabilities
self.capability_mock.get.assert_called_with(
'fake',
)
class TestListVolumePool(volume_fakes.TestVolume):
"""Tests for volume backend pool listing."""
# The pool to be listed
pools = volume_fakes.FakePool.create_one_pool()
def setUp(self):
super(TestListVolumePool, self).setUp()
self.pool_mock = self.app.client_manager.volume.pools
self.pool_mock.list.return_value = [self.pools]
# Get the command object to test
self.cmd = volume_backend.ListPool(self.app, None)
def test_pool_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
expected_columns = [
'Name',
]
# confirming if all expected columns are present in the result.
self.assertEqual(expected_columns, columns)
datalist = ((
self.pools.name,
), )
# confirming if all expected values are present in the result.
self.assertEqual(datalist, tuple(data))
# checking if proper call was made to list pools
self.pool_mock.list.assert_called_with(
detailed=False,
)
# checking if long columns are present in output
self.assertNotIn("total_volumes", columns)
self.assertNotIn("storage_protocol", columns)
def test_service_list_with_long_option(self):
arglist = [
'--long'
]
verifylist = [
('long', True)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
expected_columns = [
'Name',
'Protocol',
'Thick',
'Thin',
'Volumes',
'Capacity',
'Allocated',
'Max Over Ratio',
]
# confirming if all expected columns are present in the result.
self.assertEqual(expected_columns, columns)
datalist = ((
self.pools.name,
self.pools.storage_protocol,
self.pools.thick_provisioning_support,
self.pools.thin_provisioning_support,
self.pools.total_volumes,
self.pools.total_capacity_gb,
self.pools.allocated_capacity_gb,
self.pools.max_over_subscription_ratio,
), )
# confirming if all expected values are present in the result.
self.assertEqual(datalist, tuple(data))
self.pool_mock.list.assert_called_with(
detailed=True,
)
|
{
"content_hash": "fd64d380c54d81f2e0458f71628d315f",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 78,
"avg_line_length": 31.474025974025974,
"alnum_prop": 0.6007839900969671,
"repo_name": "dtroyer/python-openstackclient",
"id": "db1886608734a1d64146e326051d356b2f6716a7",
"size": "5415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstackclient/tests/unit/volume/v2/test_volume_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4040230"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
}
|
import mock
from openstackclient.identity.v3 import token
from openstackclient.tests.identity.v3 import fakes as identity_fakes
class TestToken(identity_fakes.TestIdentityv3):
def setUp(self):
super(TestToken, self).setUp()
# Get a shortcut to the Service Catalog Mock
self.sc_mock = mock.Mock()
self.app.client_manager.auth_ref = mock.Mock()
self.app.client_manager.auth_ref.service_catalog = self.sc_mock
class TestTokenIssue(TestToken):
def setUp(self):
super(TestTokenIssue, self).setUp()
self.cmd = token.IssueToken(self.app, None)
def test_token_issue_with_project_id(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.sc_mock.get_token.return_value = \
identity_fakes.TOKEN_WITH_PROJECT_ID
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.sc_mock.get_token.assert_called_with()
collist = ('expires', 'id', 'project_id', 'user_id')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.token_expires,
identity_fakes.token_id,
identity_fakes.project_id,
identity_fakes.user_id,
)
self.assertEqual(datalist, data)
def test_token_issue_with_domain_id(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.sc_mock.get_token.return_value = \
identity_fakes.TOKEN_WITH_DOMAIN_ID
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.sc_mock.get_token.assert_called_with()
collist = ('domain_id', 'expires', 'id', 'user_id')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.domain_id,
identity_fakes.token_expires,
identity_fakes.token_id,
identity_fakes.user_id,
)
self.assertEqual(datalist, data)
|
{
"content_hash": "0ca9712ddba587ec0a49775a1fd0ef32",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 71,
"avg_line_length": 31.940298507462686,
"alnum_prop": 0.6242990654205608,
"repo_name": "sjsucohort6/openstack",
"id": "6ad4845da703a928c17aff74b0a9bf91b7d15dba",
"size": "2737",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/venv/lib/python2.7/site-packages/openstackclient/tests/identity/v3/test_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "144982"
},
{
"name": "FreeMarker",
"bytes": "14104"
},
{
"name": "HTML",
"bytes": "8308"
},
{
"name": "Java",
"bytes": "243125"
},
{
"name": "JavaScript",
"bytes": "1493715"
},
{
"name": "Python",
"bytes": "16921939"
},
{
"name": "Shell",
"bytes": "13926"
}
],
"symlink_target": ""
}
|
from m5.params import *
from m5.proxy import *
from VirtIO import VirtIODeviceBase
from Serial import SerialDevice
class VirtIOConsole(VirtIODeviceBase):
type = 'VirtIOConsole'
cxx_header = 'dev/virtio/console.hh'
qRecvSize = Param.Unsigned(16, "Receive queue size (descriptors)")
qTransSize = Param.Unsigned(16, "Transmit queue size (descriptors)")
device = Param.SerialDevice("Serial device attached to this device")
|
{
"content_hash": "dd03ace0bb38821fed542320db823b37",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 34,
"alnum_prop": 0.751131221719457,
"repo_name": "TUD-OS/gem5-dtu",
"id": "bce5e1de2451a5f69cffb476ae4f49475c637f82",
"size": "2565",
"binary": false,
"copies": "3",
"ref": "refs/heads/dtu-mmu",
"path": "src/dev/virtio/VirtIOConsole.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
}
|
from waitress.server import create_server
import logging
def serve(app, **kw):
_server = kw.pop('_server', create_server) # test shim
_quiet = kw.pop('_quiet', False) # test shim
_profile = kw.pop('_profile', False) # test shim
if not _quiet: # pragma: no cover
# idempotent if logging has already been set up
logging.basicConfig()
server = _server(app, **kw)
if not _quiet: # pragma: no cover
print('serving on http://%s:%s' % (server.effective_host,
server.effective_port))
if _profile: # pragma: no cover
profile('server.run()', globals(), locals(), (), False)
else:
server.run()
def serve_paste(app, global_conf, **kw):
serve(app, **kw)
return 0
def profile(cmd, globals, locals, sort_order, callers): # pragma: no cover
# runs a command under the profiler and print profiling output at shutdown
import os
import profile
import pstats
import tempfile
fd, fn = tempfile.mkstemp()
try:
profile.runctx(cmd, globals, locals, fn)
stats = pstats.Stats(fn)
stats.strip_dirs()
# calls,time,cumulative and cumulative,calls,time are useful
stats.sort_stats(*(sort_order or ('cumulative', 'calls', 'time')))
if callers:
stats.print_callers(.3)
else:
stats.print_stats(.3)
finally:
os.remove(fn)
|
{
"content_hash": "7a0dd3af69952978657a12022a98925a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 34.142857142857146,
"alnum_prop": 0.596931659693166,
"repo_name": "rfguri/vimfiles",
"id": "27210d40afd3b1dfa55b4aa0c59fb7cbf566d04b",
"size": "1434",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bundle/ycm/third_party/ycmd/third_party/waitress/waitress/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import re
import os
import argparse
import subprocess
import threading
import logging
import sys
#Setup arguments
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''Avaliable groups:\n s - System information \n p - Package information \n c - Camera information''')
parser.add_argument('--disable_file_output', action='store_true', help="disable output to file")
parser.add_argument('--file', default="system_info.txt", help="file for output. Default is system_info.txt")
parser.add_argument('--error_stop', action='store_true', help="stop output in case of error")
parser.add_argument('--groups', default="spc", help="Enable output for some group. Default: spc")
#Initialize commands for getting infrmation
commands = [
#System information
{"group":"s", "command":"lscpu", "description":"CPU information"},
{"group":"s", "command":"lsb_release -a", "description":"OS information", "req":"Ubuntu 16", "error":"Ubuntu 16 is required"},
{"group":"s", "command":"uname -a", "description":"Kernel information"},
{"group":"s", "command":"lspci", "description":"PCI devices information"},
{"group":"s", "command":"g++ --version", "description":"g++ version"},
{"group":"s", "command":"echo $PATH", "description":"PATH environment variable"},
{"group":"s", "command":"echo $LD_LIBRARY_PATH", "description":"LD_LIBRARYPATH environment variable"},
{"group":"s", "command":"echo $INCLUDE", "description":"INCLUDE environment variable"},
#Package information
{"group":"p", "command":"dpkg-query -l | grep log4cxx", "description":"Version of log4cxx package"},
{"group":"p", "command":"dpkg-query -l | grep libjpeg", "description":"Version of libjpeg package"},
{"group":"p", "command":"dpkg-query -l | grep opencv-dev", "description":"Version of OpenCV package"},
#Camera information
{"group":"c", "command":"lsusb", "description":"USB devises information"},
{"group":"c", "command":"lsusb -v | grep -E '\<(Bus|iProduct|bDeviceClass|bDeviceProtocol)' 2>/dev/null", "description":"USB devices information"},
{"group":"c", "command":"dmesg | tail -n 50", "description":"dmseg information", "req":"ZR300", "error":"ZR300 camera is absent"},
{"group":"c", "command":"ldconfig -p | grep librealsense", "description":"Accesability of librealsense.so", "req":"librealsense.so", "error":"librealsense.so is absent"},
]
#Class for writing into files and stdout
class output_handler(object):
def __init__(self, file_name):
if file_name is not None:
self.file = open(file_name, 'w')
else:
self.file = None
def print_line (self, str):
if self.file is not None:
self.file.write(str + "\n")
print(str.rstrip('\n'))
#Class for run command with timeout
class run_command(object):
def __init__(self, cmd, desc, out):
self.cmd = cmd
self.desc = desc
self.out = out
self.process = None
self.std_out = None
self.std_err = None
def run(self, timeout):
def target():
#print 'Thread started. Command: ' + self.cmd
self.process = subprocess.Popen(self.cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.std_out, self.std_err = self.process.communicate()
#print 'Thread finished. Command: ' + self.cmd
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print 'Terminating process. Command: ' + self.cmd
self.process.terminate()
thread.join()
def print_output(self, output, spaces):
if output is not None:
# get rid of empty lines in output.
#output = '\n'.join([s for s in output.split('\n') if s.strip(' \t\n\r') != ""])
for t in output.split('\n'):
if t.strip() is not "":
self.out.print_line( spaces + t )
def write(self):
self.out.print_line( "\n" + self.desc + ":" )
self.out.print_line( "\tCommand: " + str(self.cmd) )
self.out.print_line( "\tReturn code: " + str(self.process.returncode) )
self.out.print_line( "\tStandard output:" )
self.print_output(self.std_out, "\t\t")
self.out.print_line( "\tStandard error:" )
self.print_output(self.std_err, "\t\t")
def filter(self, req, error):
if(self.std_out.find(req) == -1):
self.out.print_line("\tERROR: " + error)
return 1
return 0
if __name__ == '__main__':
args = parser.parse_args(sys.argv[1:])
if(args.disable_file_output):
out = output_handler(None)
else:
out = output_handler(args.file)
error_numbers = 0
for command in commands:
if(args.groups.find(command["group"]) != -1):
res_command = run_command(command["command"], command["description"], out)
res_command.run(timeout=5)
res_command.write()
if(command.has_key("req")):
error_numbers += res_command.filter(command["req"], command["error"])
if(args.error_stop and error_numbers > 0):
sys.exit(1)
if(error_numbers > 0):
sys.exit(1)
else:
sys.exit(0)
|
{
"content_hash": "28396ed284e2cde36ddc46ade3d1f7f8",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 174,
"avg_line_length": 40.62406015037594,
"alnum_prop": 0.6017027577271886,
"repo_name": "IntelRealSense/realsense_sdk",
"id": "87088ed371f8c1aa79b178a01c459b042e5960c3",
"size": "5403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/src/tools/system_info/system_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1690"
},
{
"name": "C++",
"bytes": "1144840"
},
{
"name": "CMake",
"bytes": "29445"
},
{
"name": "Python",
"bytes": "10128"
}
],
"symlink_target": ""
}
|
import cv2
import numpy as np
import os
###################################################################################################
def main():
capWebcam = cv2.VideoCapture(0) # declare a VideoCapture object and associate to webcam, 0 => use 1st webcam
if capWebcam.isOpened() == False: # check if VideoCapture object was associated to webcam successfully
print "error: capWebcam not accessed successfully\n\n" # if not, print error message to std out
os.system("pause") # pause until user presses a key so user can see error message
return # and exit function (which exits program)
while cv2.waitKey(1) != 27 and capWebcam.isOpened(): # until the Esc key is pressed or webcam connection is lost
blnFrameReadSuccessfully, imgOriginal = capWebcam.read() # read next frame
if not blnFrameReadSuccessfully or imgOriginal is None: # if frame was not read successfully
print "error: frame not read from webcam\n" # print error message to std out
os.system("pause") # pause until user presses a key so user can see error message
break # exit while loop (which exits program)
imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)
imgThreshLow = cv2.inRange(imgHSV, (0, 155, 155), (18, 255, 255))
imgThreshHigh = cv2.inRange(imgHSV, (165, 155, 155), (179, 255, 255))
imgThresh = cv2.add(imgThreshLow, imgThreshHigh)
imgThresh = cv2.GaussianBlur(imgThresh, (3, 3), 2) # blur
imgThresh = cv2.dilate(imgThresh, np.ones((5,5),np.uint8)) # close image (dilate, then erode)
imgThresh = cv2.erode(imgThresh, np.ones((5,5),np.uint8)) # closing "closes" (i.e. fills in) foreground gaps
intRows, intColumns = imgThresh.shape # break out number of rows and columns in the image, rows is used for minimum distance between circles in call to Hough Circles
circles = cv2.HoughCircles(imgThresh, cv2.HOUGH_GRADIENT, 2, intRows / 4) # fill variable circles with all circles in the processed image
if circles is not None: # this line is necessary to keep program from crashing on next line if no circles were found
for circle in circles[0]: # for each circle
x, y, radius = circle # break out x, y, and radius
print "ball position x = " + str(x) + ", y = " + str(y) + ", radius = " + str(radius) # print ball position and radius
cv2.circle(imgOriginal, (x, y), 3, (0, 255, 0), cv2.FILLED) # draw small green circle at center of detected object
cv2.circle(imgOriginal, (x, y), radius, (0, 0, 255), 3) # draw red circle around the detected object
# end for
# end if
cv2.namedWindow("imgOriginal", cv2.WINDOW_AUTOSIZE) # create windows, use WINDOW_AUTOSIZE for a fixed window size
cv2.namedWindow("imgThresh", cv2.WINDOW_AUTOSIZE) # or use WINDOW_NORMAL to allow window resizing
cv2.imshow("imgOriginal", imgOriginal) # show windows
cv2.imshow("imgThresh", imgThresh)
# end while
cv2.destroyAllWindows() # remove windows from memory
return
###################################################################################################
if __name__ == "__main__":
main()
|
{
"content_hash": "939bfeb8d781cd465f03bae2ba7bbc33",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 180,
"avg_line_length": 63.18032786885246,
"alnum_prop": 0.5321743642968345,
"repo_name": "lucasbrsa/OpenCV-3.2",
"id": "2f241f959926e93f7e37b41774326aa40cfe05ef",
"size": "3875",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "projects/gettingStarted/RedBallTracker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "320592"
},
{
"name": "C#",
"bytes": "12756"
},
{
"name": "C++",
"bytes": "499322"
},
{
"name": "CMake",
"bytes": "244871"
},
{
"name": "Makefile",
"bytes": "344335"
},
{
"name": "Python",
"bytes": "7735"
},
{
"name": "Visual Basic",
"bytes": "13139"
}
],
"symlink_target": ""
}
|
"""
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one or one-to-one
relation: ``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
If you're looking for ``ForwardOneToOneDescriptor``, use
``ForwardManyToOneDescriptor`` instead.
2. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
3. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
4. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from __future__ import unicode_literals
import warnings
from operator import attrgetter
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor(object):
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.remote_field.model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.remote_field.model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
qs = self.get_queryset(instance=instance)
qs = qs.filter(self.field.get_reverse_related_filter(instance))
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` in the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
class ReverseOneToOneDescriptor(object):
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
def instance_attr(obj):
return obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``instance_type`` in the ``Place`` class (we don't need it)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` in the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
pass
else:
delattr(instance, self.cache_name)
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseManyToOneDescriptor(object):
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
return create_reverse_many_to_one_manager(
self.rel.related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``instance_type`` in the ``Parent`` class (we don't need it)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
"""
Set the related objects through the reverse relation.
With the example above, when setting ``parent.children = children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``value`` in the ``children`` sequence on the right of the equal sign
"""
warnings.warn(
'Direct assignment to the reverse side of a related set is '
'deprecated due to the implicit save() that happens. Use %s.set() '
'instead.' % self.rel.get_accessor_name(), RemovedInDjango20Warning, stacklevel=2,
)
manager = self.__get__(instance)
manager.set(value)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return queryset.none()
queryset._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return queryset
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
queryset = super(RelatedManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super(ManyToManyDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
return queryset._next_is_sticky().filter(**self.core_filters)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super(ManyRelatedManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
|
{
"content_hash": "0aa00b110fc9db2095385eba97a7ee53",
"timestamp": "",
"source": "github",
"line_count": 1056,
"max_line_length": 119,
"avg_line_length": 44.65340909090909,
"alnum_prop": 0.577172668278407,
"repo_name": "rynomster/django",
"id": "b6349e5c495c0d6b93a800bc8964bbf96560f598",
"size": "47154",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/db/models/fields/related_descriptors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52372"
},
{
"name": "HTML",
"bytes": "170531"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11518250"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import os
from base64 import b64encode
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.models import DAG
from airflow.providers.sftp.hooks.sftp import SFTPHook
from airflow.providers.sftp.operators.sftp import SFTPOperation, SFTPOperator
from airflow.providers.ssh.hooks.ssh import SSHHook
from airflow.providers.ssh.operators.ssh import SSHOperator
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
DEFAULT_DATE = datetime(2017, 1, 1)
TEST_CONN_ID = "conn_id_for_testing"
class TestSFTPOperator:
def setup_method(self):
hook = SSHHook(ssh_conn_id='ssh_default')
hook.no_host_key_check = True
self.hook = hook
self.test_dir = "/tmp"
self.test_local_dir = "/tmp/tmp2"
self.test_remote_dir = "/tmp/tmp1"
self.test_local_filename = 'test_local_file'
self.test_remote_filename = 'test_remote_file'
self.test_local_filepath = f'{self.test_dir}/{self.test_local_filename}'
# Local Filepath with Intermediate Directory
self.test_local_filepath_int_dir = f'{self.test_local_dir}/{self.test_local_filename}'
self.test_remote_filepath = f'{self.test_dir}/{self.test_remote_filename}'
# Remote Filepath with Intermediate Directory
self.test_remote_filepath_int_dir = f'{self.test_remote_dir}/{self.test_remote_filename}'
def teardown_method(self):
if os.path.exists(self.test_local_filepath):
os.remove(self.test_local_filepath)
if os.path.exists(self.test_local_filepath_int_dir):
os.remove(self.test_local_filepath_int_dir)
if os.path.exists(self.test_local_dir):
os.rmdir(self.test_local_dir)
if os.path.exists(self.test_remote_filepath):
os.remove(self.test_remote_filepath)
if os.path.exists(self.test_remote_filepath_int_dir):
os.remove(self.test_remote_filepath_int_dir)
if os.path.exists(self.test_remote_dir):
os.rmdir(self.test_remote_dir)
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_pickle_file_transfer_put(self, dag_maker):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, 'wb') as file:
file.write(test_local_file_content)
with dag_maker(dag_id="unit_tests_sftp_op_pickle_file_transfer_put", start_date=DEFAULT_DATE):
SFTPOperator( # Put test file to remote.
task_id="put_test_task",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
create_intermediate_dirs=True,
)
SSHOperator( # Check the remote file content.
task_id="check_file_task",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath}",
do_xcom_push=True,
)
tis = {ti.task_id: ti for ti in dag_maker.create_dagrun().task_instances}
tis["put_test_task"].run()
tis["check_file_task"].run()
pulled = tis["check_file_task"].xcom_pull(task_ids="check_file_task", key='return_value')
assert pulled.strip() == test_local_file_content
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_file_transfer_no_intermediate_dir_error_put(self, create_task_instance_of_operator):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, 'wb') as file:
file.write(test_local_file_content)
# Try to put test file to remote. This should raise an error with
# "No such file" as the directory does not exist.
ti2 = create_task_instance_of_operator(
SFTPOperator,
dag_id="unit_tests_sftp_op_file_transfer_no_intermediate_dir_error_put",
execution_date=timezone.utcnow(),
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath_int_dir,
operation=SFTPOperation.PUT,
create_intermediate_dirs=False,
)
with pytest.raises(Exception) as ctx:
ti2.run()
assert 'No such file' in str(ctx.value)
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_file_transfer_with_intermediate_dir_put(self, dag_maker):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, 'wb') as file:
file.write(test_local_file_content)
with dag_maker(dag_id="unit_tests_sftp_op_file_transfer_with_intermediate_dir_put"):
SFTPOperator( # Put test file to remote.
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath_int_dir,
operation=SFTPOperation.PUT,
create_intermediate_dirs=True,
)
SSHOperator( # Check the remote file content.
task_id="test_check_file",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath_int_dir}",
do_xcom_push=True,
)
dagrun = dag_maker.create_dagrun(execution_date=timezone.utcnow())
tis = {ti.task_id: ti for ti in dagrun.task_instances}
tis["test_sftp"].run()
tis["test_check_file"].run()
pulled = tis["test_check_file"].xcom_pull(task_ids='test_check_file', key='return_value')
assert pulled.strip() == test_local_file_content
@conf_vars({('core', 'enable_xcom_pickling'): 'False'})
def test_json_file_transfer_put(self, dag_maker):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, 'wb') as file:
file.write(test_local_file_content)
with dag_maker(dag_id="unit_tests_sftp_op_json_file_transfer_put"):
SFTPOperator( # Put test file to remote.
task_id="put_test_task",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
)
SSHOperator( # Check the remote file content.
task_id="check_file_task",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath}",
do_xcom_push=True,
)
dagrun = dag_maker.create_dagrun(execution_date=timezone.utcnow())
tis = {ti.task_id: ti for ti in dagrun.task_instances}
tis["put_test_task"].run()
tis["check_file_task"].run()
pulled = tis["check_file_task"].xcom_pull(task_ids="check_file_task", key='return_value')
assert pulled.strip() == b64encode(test_local_file_content).decode('utf-8')
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_pickle_file_transfer_get(self, dag_maker):
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
with dag_maker(dag_id="unit_tests_sftp_op_pickle_file_transfer_get"):
SSHOperator( # Create a test file on remote.
task_id="test_create_file",
ssh_hook=self.hook,
command=f"echo '{test_remote_file_content}' > {self.test_remote_filepath}",
do_xcom_push=True,
)
SFTPOperator( # Get remote file to local.
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
)
for ti in dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances:
ti.run()
# Test the received content.
with open(self.test_local_filepath) as file:
content_received = file.read()
assert content_received.strip() == test_remote_file_content
@conf_vars({('core', 'enable_xcom_pickling'): 'False'})
def test_json_file_transfer_get(self, dag_maker):
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
with dag_maker(dag_id="unit_tests_sftp_op_json_file_transfer_get"):
SSHOperator( # Create a test file on remote.
task_id="test_create_file",
ssh_hook=self.hook,
command=f"echo '{test_remote_file_content}' > {self.test_remote_filepath}",
do_xcom_push=True,
)
SFTPOperator( # Get remote file to local.
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
)
for ti in dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances:
ti.run()
# Test the received content.
content_received = None
with open(self.test_local_filepath) as file:
content_received = file.read()
assert content_received.strip() == test_remote_file_content.encode('utf-8').decode('utf-8')
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_file_transfer_no_intermediate_dir_error_get(self, dag_maker):
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
with dag_maker(dag_id="unit_tests_sftp_op_file_transfer_no_intermediate_dir_error_get"):
SSHOperator( # Create a test file on remote.
task_id="test_create_file",
ssh_hook=self.hook,
command=f"echo '{test_remote_file_content}' > {self.test_remote_filepath}",
do_xcom_push=True,
)
SFTPOperator( # Try to GET test file from remote.
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath_int_dir,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
)
ti1, ti2 = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances
ti1.run()
# This should raise an error with "No such file" as the directory
# does not exist.
with pytest.raises(Exception) as ctx:
ti2.run()
assert 'No such file' in str(ctx.value)
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_file_transfer_with_intermediate_dir_error_get(self, dag_maker):
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
with dag_maker(dag_id="unit_tests_sftp_op_file_transfer_with_intermediate_dir_error_get"):
SSHOperator( # Create a test file on remote.
task_id="test_create_file",
ssh_hook=self.hook,
command=f"echo '{test_remote_file_content}' > {self.test_remote_filepath}",
do_xcom_push=True,
)
SFTPOperator( # Get remote file to local.
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath_int_dir,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
create_intermediate_dirs=True,
)
for ti in dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances:
ti.run()
# Test the received content.
content_received = None
with open(self.test_local_filepath_int_dir) as file:
content_received = file.read()
assert content_received.strip() == test_remote_file_content
@mock.patch.dict('os.environ', {'AIRFLOW_CONN_' + TEST_CONN_ID.upper(): "ssh://test_id@localhost"})
def test_arg_checking(self):
dag = DAG(dag_id="unit_tests_sftp_op_arg_checking", default_args={"start_date": DEFAULT_DATE})
# Exception should be raised if neither ssh_hook nor ssh_conn_id is provided
with pytest.raises(AirflowException, match="Cannot operate without sftp_hook or ssh_conn_id."):
task_0 = SFTPOperator(
task_id="test_sftp_0",
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
task_0.execute(None)
# if ssh_hook is invalid/not provided, use ssh_conn_id to create SSHHook
task_1 = SFTPOperator(
task_id="test_sftp_1",
ssh_hook="string_rather_than_SSHHook", # type: ignore
ssh_conn_id=TEST_CONN_ID,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
try:
task_1.execute(None)
except Exception:
pass
assert task_1.sftp_hook.ssh_conn_id == TEST_CONN_ID
task_2 = SFTPOperator(
task_id="test_sftp_2",
ssh_conn_id=TEST_CONN_ID, # no ssh_hook provided
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
try:
task_2.execute(None)
except Exception:
pass
assert task_2.sftp_hook.ssh_conn_id == TEST_CONN_ID
# if both valid ssh_hook and ssh_conn_id are provided, ignore ssh_conn_id
task_3 = SFTPOperator(
task_id="test_sftp_3",
ssh_hook=self.hook,
ssh_conn_id=TEST_CONN_ID,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
try:
task_3.execute(None)
except Exception:
pass
assert task_3.sftp_hook.ssh_conn_id == self.hook.ssh_conn_id
# Exception should be raised if operation is invalid
with pytest.raises(TypeError, match="Unsupported operation value invalid_operation, "):
task_4 = SFTPOperator(
task_id="test_sftp_4",
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation='invalid_operation',
dag=dag,
)
task_4.execute(None)
# Exception should be raised if both ssh_hook and sftp_hook are provided
with pytest.raises(
AirflowException,
match="Both `ssh_hook` and `sftp_hook` are defined. Please use only one of them.",
):
task_5 = SFTPOperator(
task_id="test_sftp_5",
ssh_hook=self.hook,
sftp_hook=SFTPHook(),
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
task_5.execute(None)
task_6 = SFTPOperator(
task_id="test_sftp_6",
ssh_conn_id=TEST_CONN_ID,
remote_host='remotehost',
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
try:
task_6.execute(None)
except Exception:
pass
assert task_6.sftp_hook.remote_host == 'remotehost'
|
{
"content_hash": "8cacd1c7a9d7a10fb8cdc0469a63406b",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 103,
"avg_line_length": 42.2970297029703,
"alnum_prop": 0.5866690074906367,
"repo_name": "danielvdende/incubator-airflow",
"id": "544b7cc5eacf0f89fc3f28295317912009cee84f",
"size": "17875",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/providers/sftp/operators/test_sftp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/dungeon/corellian_corvette/shared_corvette_search_rebel_destroy_01.iff"
result.attribute_template_id = -1
result.stfName("frn_n","frn_metal_crate")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "32fe522d22c11c2a6a533599bca300c8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 107,
"avg_line_length": 26.076923076923077,
"alnum_prop": 0.7109144542772862,
"repo_name": "anhstudios/swganh",
"id": "b949aa5f197f9502bfaedb4ac44bfb5438aecf7a",
"size": "484",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/dungeon/corellian_corvette/shared_corvette_search_rebel_destroy_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""Context for storing options for loading a SavedModel."""
import contextlib
import threading
class LoadContext(threading.local):
"""A context for loading a model."""
def __init__(self):
super(LoadContext, self).__init__()
self._load_options = None
def set_load_options(self, load_options):
self._load_options = load_options
def clear_load_options(self):
self._load_options = None
def load_options(self):
return self._load_options
_load_context = LoadContext()
@contextlib.contextmanager
def load_context(load_options):
_load_context.set_load_options(load_options)
try:
yield
finally:
_load_context.clear_load_options()
def get_load_options():
"""Returns whether under a load context."""
return _load_context.load_options()
|
{
"content_hash": "e989ee31dc5526777332232a77555c01",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 59,
"avg_line_length": 20.68421052631579,
"alnum_prop": 0.6972010178117048,
"repo_name": "sarvex/tensorflow",
"id": "23532eee1e2f58c8f56ce7e7558cca647e7b49b3",
"size": "1475",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tensorflow/python/keras/saving/saved_model/load_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import ipaddress
import platform
import responses
import pytest
import tempfile
from django.core.exceptions import SuspiciousOperation
from mock import patch
from sentry import http
from sentry.testutils import TestCase
class HttpTest(TestCase):
@responses.activate
@patch('socket.getaddrinfo')
def test_simple(self, mock_getaddrinfo):
mock_getaddrinfo.return_value = [(2, 1, 6, '', ('81.0.0.1', 0))]
responses.add(responses.GET, 'http://example.com', body='foo bar')
resp = http.safe_urlopen('http://example.com')
data = http.safe_urlread(resp)
assert data.decode('utf-8') == 'foo bar'
request = responses.calls[0].request
assert 'User-Agent' in request.headers
assert 'gzip' in request.headers.get('Accept-Encoding', '')
# XXX(dcramer): we can't use responses here as it hooks Session.send
# @responses.activate
def test_ip_blacklist(self):
http.DISALLOWED_IPS = set([
ipaddress.ip_network(u'127.0.0.1'),
ipaddress.ip_network(u'::1'),
ipaddress.ip_network(u'10.0.0.0/8'),
])
with pytest.raises(SuspiciousOperation):
http.safe_urlopen('http://127.0.0.1')
with pytest.raises(SuspiciousOperation):
http.safe_urlopen('http://10.0.0.10')
with pytest.raises(SuspiciousOperation):
# '2130706433' is dword for '127.0.0.1'
http.safe_urlopen('http://2130706433')
with pytest.raises(SuspiciousOperation):
# ipv6
http.safe_urlopen('http://[::1]')
@pytest.mark.skipif(platform.system() == 'Darwin',
reason='macOS is always broken, see comment in sentry/http.py')
def test_garbage_ip(self):
http.DISALLOWED_IPS = set([ipaddress.ip_network(u'127.0.0.1')])
with pytest.raises(SuspiciousOperation):
# '0177.0000.0000.0001' is an octal for '127.0.0.1'
http.safe_urlopen('http://0177.0000.0000.0001')
@responses.activate
def test_fetch_file(self):
responses.add(responses.GET, 'http://example.com', body='foo bar',
content_type='application/json')
temp = tempfile.TemporaryFile()
result = http.fetch_file(
url='http://example.com',
domain_lock_enabled=False,
outfile=temp
)
temp.seek(0)
assert result.body is None
assert temp.read() == 'foo bar'
temp.close()
|
{
"content_hash": "37a0781f65017e42907806b9ad141c67",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 87,
"avg_line_length": 35.25,
"alnum_prop": 0.6107171000788022,
"repo_name": "BuildingLink/sentry",
"id": "09c5596614585187c0e8ffba65667fb745070045",
"size": "2538",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/test_http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "259940"
},
{
"name": "HTML",
"bytes": "297498"
},
{
"name": "JavaScript",
"bytes": "1051088"
},
{
"name": "Lua",
"bytes": "45617"
},
{
"name": "Makefile",
"bytes": "6255"
},
{
"name": "Python",
"bytes": "14120672"
},
{
"name": "Ruby",
"bytes": "4084"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.