text
stringlengths 4
1.02M
| meta
dict |
|---|---|
__author__ = 'wangzhaoyi'
import MySQLdb
from zhihu import Question
from zhihu import User
from zhihu import Collection
from zhihu import Search
class ConnectItem:
def __init__(self,user=None, host=None, password=None, dbname=None):
self.user = user
self.host = host
self.password = password
self.dbname = dbname
def getItem(self):
return self.user, self.host, self.password, self.dbname
class PutInDB:
connectItem = ConnectItem()
connect = None
def __init__(self, connectItem=None, user=None, host=None, password=None, dbname=None):
if connectItem != None:
self.connectItem = connectItem
else:
self.connectItem = ConnectItem(user, host, password, dbname)
self.connect=MySQLdb.connect(connectItem.getItem(),port=3306)
def put_question_in_db(self, question):
cursor = self.connect.cursor()
id = question.get_question_id()
title = question.get_title()
detail = question.get_detail()
answer_num = question.get_answers_num()
followers_num = question.get_followers_num()
topics = question.get_topics()
#answers = question.get_all_answers()
cursor.execute("insert into Questions values(%s, %s, %s, %d, %d)" % id,'00000000' , detail, title, answer_num, followers_num)
for topic in topics:
topic_id = topic.get_topic_id
cursor.execute("insert into Question_Topics values(%s, %s)" % topic_id, id)
cursor.close()
self.connect.commit()
def put_answer_in_db(self, answer):
cursor = self.connect.cursor()
id = answer.get_answer_id()
question = answer.get_question()
question_id = question.get_question_id()
author = answer.get_author()
author_id = author.get_user_id()
detail = answer.get_content()
upvote = answer.get_upvote()
visit_times = answer.get_visit_times()
cursor.execute("insert into answers values(%s, %s, %s, %s, %d, %d)" % id, question_id, author_id, detail, upvote,visit_times)
cursor.close()
self.connect.commit()
def put_user_in_db(self, user):
cursor = self.connect.cursor()
user_id = user.get_user_id()
follower_num = user.get_followers_num()
followee_num = user.get_followees_num()
agree_num = user.get_agree_num()
thanks_num = user.get_thanks_num()
ask_num = user.get_asks_num()
answer_num = user.get_answers_num()
collection_num = user.get_collections_num()
cursor.execute("insert into users values(%s, %d, %d, %d, %d, %d, %d)" % user_id, follower_num, followee_num, agree_num, thanks_num, ask_num, answer_num,collection_num)
# for the Tables besides User
asks = user.get_asks()
followers = user.get_followers()
collections = user.get_collections()
for question in asks:
quesiton_id = question.get_question_id()
cursor.execute("update questions set asker_ID=%s where question_id=%d"% user_id, quesiton_id)
for user in followers:
follower_id = user.get_user_id()
cursor.execute("insert into follow_user values(%s, %s)" % follower_id, user_id)
for collection in collections:
collection_name = collection.get_name()
cursor.execute("insert into collection value(%s, %s)" % collection_name, user_id)
cursor.close()
self.connect.commit()
def put_collection_in_db(self, collection):
cursor = self.connect.cursor()
name = collection.get_name()
answers = collection.get_all_answers()
for answer in answers:
answer_id = answer.get_answer_id()
cursor.execute("insert into Collection_Answers values(%s, %s)" % name, answer_id)
cursor.close()
self.connect.commit()
|
{
"content_hash": "382a92c853334c57cd274024a6c1affb",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 175,
"avg_line_length": 40.697916666666664,
"alnum_prop": 0.6160737138469414,
"repo_name": "Ulden/WormForZhihu",
"id": "fd984d5f1dd8c00c1e40b855929baabcf7886e6b",
"size": "3907",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "worm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61672"
}
],
"symlink_target": ""
}
|
import os
import testtools
from neutron.common import utils
from neutron.tests.common.exclusive_resources import resource_allocator
from neutron.tests.functional import base
def safe_remove_file(file_path):
try:
os.remove(file_path)
except OSError:
pass
class TestResourceAllocator(base.BaseLoggingTestCase):
def setUp(self):
super(TestResourceAllocator, self).setUp()
self.ra = resource_allocator.ResourceAllocator(
utils.get_random_string(6), lambda: 42)
self.addCleanup(safe_remove_file, self.ra._state_file_path)
def test_allocate_and_release(self):
# Assert that we can allocate a resource
resource = self.ra.allocate()
self.assertEqual('42', resource)
# Assert that we cannot allocate any more resources, since we're
# using an allocator that always returns the same value
with testtools.ExpectedException(ValueError):
self.ra.allocate()
# Assert that releasing the resource and allocating again works
self.ra.release(resource)
resource = self.ra.allocate()
self.assertEqual('42', resource)
def test_file_manipulation(self):
# The file should not be created until the first allocation
self.assertFalse(os.path.exists(self.ra._state_file_path))
resource = self.ra.allocate()
self.assertTrue(os.path.exists(self.ra._state_file_path))
# Releasing the last resource should delete the file
self.ra.release(resource)
self.assertFalse(os.path.exists(self.ra._state_file_path))
|
{
"content_hash": "95eb32fc162103728cc428433d0e72aa",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 72,
"avg_line_length": 34.191489361702125,
"alnum_prop": 0.6845052893590542,
"repo_name": "cloudbase/neutron",
"id": "eea791b08a8de5c848a68827a606867411f20522",
"size": "2213",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/tests/functional/tests/common/exclusive_resources/test_resource_allocator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9942988"
},
{
"name": "Shell",
"bytes": "14325"
}
],
"symlink_target": ""
}
|
import sys
class Genotype(object):
'''
This class stores information about each sample.
'''
def __init__(self, variant, value_list):
'''
Initialize the class. All instances have a GT field,
but that is enforced in the Variant class.
'''
self.value_list = value_list
self.variant = variant
def __eq__(self, other):
return self.get_gt_string() == other.get_gt_string()
def set_format(self, field, value):
'''
Set information for an individual format field.
'''
if field in self.variant.format_set:
if field in self.variant.format_dict:
i = self.variant.format_dict[field]
self._set_value(i, value)
else:
num_fields = len(self.variant.format_dict)
self.variant.format_dict[field] = num_fields
self._set_value(num_fields, value)
else:
sys.stderr.write('\nError: invalid FORMAT field, \"' + field + '\"\n')
sys.exit(1)
def _set_value(self, index, value):
try:
self.value_list[index] = value
except IndexError:
self.value_list.extend(['.'] * (index - len(self.value_list)))
self.value_list.append(value)
def get_format(self, field):
'''
Get value of particular field key
'''
try:
return self.value_list[self.variant.format_dict[field]]
except IndexError:
if field != 'GT':
return '.'
else:
return './.'
def get_gt_string(self):
'''
Convert object back to string.
If some values are missing (at the end for example) they are printed out as
all format fields present in any Genotype instance in the Variant line
are tracked.
'''
g_list = list()
for f in self.variant.format_list:
if f.id in self.variant.format_dict:
value = self.get_format(f.id)
if type(value) == float:
g_list.append('%0.2f' % value)
else:
g_list.append(str(value))
return ':'.join(g_list)
|
{
"content_hash": "6d19b81eeb1d19aad3566f52e690aa20",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 83,
"avg_line_length": 32.15714285714286,
"alnum_prop": 0.5264326965792981,
"repo_name": "hall-lab/svtools",
"id": "c2d2f0a51bf130e755ff8fc088828e750564e768",
"size": "2251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "svtools/vcf/genotype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "2266"
},
{
"name": "Python",
"bytes": "468744"
},
{
"name": "Shell",
"bytes": "4721"
}
],
"symlink_target": ""
}
|
import os
import responses
import requests
import unittest
from ocremix import OCRemixDownloader
class TestOCRemixDownloader(unittest.TestCase):
def setUp(self):
self.testee = OCRemixDownloader()
self.source_url = "http://ocremix.org/remix/OCR03357"
def test_get_download_link_from_page(self):
actual = self.testee.get_download_link_from_page(self.source_url)
assert "http://ocr.blueblue.fr/files/music/remixes/Final_Fantasy_9_The_Journey_Home_OC_ReMix.mp3" == actual
def test_get_md5_sum_from_page(self):
actual = self.testee.get_md5_sum_from_page(self.source_url)
assert "fd95488024ec07b484eb597f9e133298" == actual
def test_get_md5_from_file(self):
actual = self.testee.get_md5_from_file("/home/yur763/media/music/OCREMIX/singles/Final_Fantasy_9_The_Journey_Home_OC_ReMix.mp3")
assert "fd95488024ec07b484eb597f9e133298" == actual
def test_download_and_write_file(self):
mp3_url = self.testee.get_download_link_from_page(self.source_url)
target_file = self.testee.download_and_write_file(mp3_url, "./")
wanted = self.testee.get_md5_sum_from_page(self.source_url)
actual = self.testee.get_md5_from_file(target_file.name)
os.system("rm -f %s" % target_file.name)
assert actual == wanted
|
{
"content_hash": "bd7d9901b02d39dc7b9a33a162b97bca",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 136,
"avg_line_length": 39.05882352941177,
"alnum_prop": 0.697289156626506,
"repo_name": "filiphe/OCRemix-Downloader",
"id": "9338bba65b5c349601215957c79eee5a170bed14",
"size": "1352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ocremix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3032"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
import pandas as pd
from keras import backend as K
K.set_image_dim_ordering('tf')
import re
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk import word_tokenize
import string
import pickle
from keras.layers import Dropout
BASE_DIR = '../'
GLOVE_DIR = BASE_DIR + 'data/glove.6B'
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.5
data = '/Users/Teban1503/Documents/aihack/data/Reto-Damappa-Dataset/Hackathon_Dataset/Text Datasets/ori_data/all_data.csv'
training_data = '/Users/Teban1503/Documents/aihack/data/Reto-Damappa-Dataset/Hackathon_Dataset/Text Datasets/ori_data/train.csv'
#Index of words
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
#print('values : ', len(values))
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
#Preprocess and clean data
# second, prepare text samples and their labels
texts = [] # list of text samples
labels_index = {0:'hate',1:'offensive',2:'neither'} # dictionary mapping label name to numeric id before
labels_bin = {0:'negative',1:'positive'}
labels = [] # list of label ids
stop = set(stopwords.words('english'))
augmentinfo = []
augmentlabel = []
with open(data) as file:
for item in file:
data = item.split('\t')
label_ = data[1]
text_ = data[2].strip()
#extra space
text_ =text_.lower()
#print('BEFORE TEXT: ', text_)
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
hashtag_regex = '#[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_)
parsed_text = re.sub(giant_url_regex, '', parsed_text)
parsed_text = re.sub(mention_regex, '', parsed_text)
parsed_text = re.sub("&.*?;",'',parsed_text)
parsed_text = re.sub("(\?)+",'?',parsed_text)
parsed_text = re.sub("(\!)+",'!',parsed_text)
parsed_text = re.sub("(\.)+",'.',parsed_text)
parsed_text = re.sub("(\,)+",'.',parsed_text)
parsed_text = re.sub("rt\s:",'',parsed_text)
parsed_text = re.sub(hashtag_regex,'',parsed_text)
parsed_text = " ".join(parsed_text.split())
#print('AFTER TEXT: ', parsed_text)
#tokens = word_tokenize(parsed_text)
#print('TOKENS: ', tokens)
texts.append(parsed_text)
if int(label_)<2:
labels.append('0')
else:
labels.append('1')
#texto = [i for i in word_tokenize(text_.lower()) if i not in stop]
#for ele in texto:
# print('%%%')
# print (ele)
# print('$$$')
# test = wordnet.synsets(ele)
# print (test)
# print('&&&')
#texto = " ".join(str(x) for x in texto)
#texto = texto.translate(None, string.punctuation).strip()
# finally, vectorize the text samples
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
print(texts[0])
print(sequences[0])
print(tokenizer.word_index['woman'])
print(embeddings_index['woman'])
word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-validation_samples]
y_train = labels[:-validation_samples]
x_val = data[-validation_samples:]
y_val = labels[-validation_samples:]
# prepare embedding matrix
# it is a copy of the embedding index and word_index
num_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print(embedding_matrix[385])
print('------------------')
#print(word_index)
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
#Important to save the tokenizer to later read to precit
with open("debug_nabetse_default_tutorial_tokenizer_5.pickle", "wb") as f:
pickle.dump(tokenizer, f)
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
print('$$$$$', embedded_sequences)
print(embedded_sequences)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(labels_bin), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
model.fit(x_train, y_train,
batch_size=128,
epochs=10,
validation_data=(x_val, y_val))
model.save('debug_nabetse_default_tutorial_5.h5')
|
{
"content_hash": "bd95e072f39df16dcfb92d78068c9d44",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 128,
"avg_line_length": 30.178947368421053,
"alnum_prop": 0.665852807813045,
"repo_name": "jasanmiguel10/HackEvent",
"id": "9e429c0d834f01e773a22a484ef65c730d926004",
"size": "5734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/model_ori.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "5638"
},
{
"name": "Python",
"bytes": "27849"
}
],
"symlink_target": ""
}
|
import json
import logging
import re
import sys
import time
from django import forms
from django.contrib import messages
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, QueryDict
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from desktop.appmanager import get_apps_dict
from desktop.conf import REDIRECT_WHITELIST
from desktop.context_processors import get_app_name
from desktop.lib.paginator import Paginator
from desktop.lib.django_util import JsonResponse
from desktop.lib.django_util import copy_query_dict, format_preserving_redirect, render
from desktop.lib.django_util import login_notrequired, get_desktop_uri_prefix
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document
from desktop.lib.parameterization import find_variables
from notebook.models import escape_rows
import beeswax.forms
import beeswax.design
import beeswax.management.commands.beeswax_install_examples
from beeswax import common, data_export, models
from beeswax.models import QueryHistory, SavedQuery, Session
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException
LOG = logging.getLogger(__name__)
# For scraping Job IDs from logs
HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
SPARK_APPLICATION_RE = re.compile("Running with YARN Application = (?P<application_id>application_\d+_\d+)")
TEZ_APPLICATION_RE = re.compile("Executing on YARN cluster with App id ([a-z0-9_]+?)\)")
def index(request):
return execute_query(request)
"""
Design views
"""
def save_design(request, form, type_, design, explicit_save):
"""
save_design(request, form, type_, design, explicit_save) -> SavedQuery
A helper method to save the design:
* If ``explicit_save``, then we save the data in the current design.
* If the user clicked the submit button, we do NOT overwrite the current
design. Instead, we create a new "auto" design (iff the user modified
the data). This new design is named after the current design, with the
AUTO_DESIGN_SUFFIX to signify that it's different.
Need to return a SavedQuery because we may end up with a different one.
Assumes that form.saveform is the SaveForm, and that it is valid.
"""
authorized_get_design(request, design.id)
assert form.saveform.is_valid()
sub_design_form = form # Beeswax/Impala case
if type_ == models.HQL:
design_cls = beeswax.design.HQLdesign
elif type_ == models.IMPALA:
design_cls = beeswax.design.HQLdesign
elif type_ == models.SPARK:
from spark.design import SparkDesign
design_cls = SparkDesign
sub_design_form = form.query
else:
raise ValueError(_('Invalid design type %(type)s') % {'type': type_})
design_obj = design_cls(sub_design_form, query_type=type_)
name = form.saveform.cleaned_data['name']
desc = form.saveform.cleaned_data['desc']
return _save_design(request.user, design, type_, design_obj, explicit_save, name, desc)
def _save_design(user, design, type_, design_obj, explicit_save, name=None, desc=None):
# Design here means SavedQuery
old_design = design
new_data = design_obj.dumps()
# Auto save if (1) the user didn't click "save", and (2) the data is different.
# Create an history design if the user is executing a shared design.
# Don't generate an auto-saved design if the user didn't change anything.
if explicit_save and (not design.doc.exists() or design.doc.get().can_write_or_exception(user)):
design.name = name
design.desc = desc
design.is_auto = False
elif design_obj != old_design.get_design():
# Auto save iff the data is different
if old_design.id is not None:
# Clone iff the parent design isn't a new unsaved model
design = old_design.clone(new_owner=user)
if not old_design.is_auto:
design.name = old_design.name + models.SavedQuery.AUTO_DESIGN_SUFFIX
else:
design.name = models.SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.is_auto = True
design.name = design.name[:64]
design.type = type_
design.data = new_data
design.save()
LOG.info('Saved %s design "%s" (id %s) for %s' % (explicit_save and '' or 'auto ', design.name, design.id, design.owner))
if design.doc.exists():
design.doc.update(name=design.name, description=design.desc)
else:
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
if design.is_auto:
design.doc.get().add_to_history()
return design
def delete_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id, owner_only=True)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot delete non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
if request.POST.get('skipTrash', 'false') == 'false':
design.doc.get().send_to_trash()
else:
design.doc.all().delete()
design.delete()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Delete design(s)?')})
def restore_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot restore non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
design.doc.get().restore_from_trash()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Restore design(s)?')})
def clone_design(request, design_id):
"""Clone a design belonging to any user"""
design = authorized_get_design(request, design_id)
if design is None:
LOG.error('Cannot clone non-existent design %s' % (design_id,))
return list_designs(request)
copy = design.clone(request.user)
copy.save()
name = copy.name + '-copy'
design.doc.get().copy(content_object=copy, name=name, owner=request.user)
messages.info(request, _('Copied design: %(name)s') % {'name': design.name})
return format_preserving_redirect(request, reverse(get_app_name(request) + ':execute_design', kwargs={'design_id': copy.id}))
def list_designs(request):
"""
View function for show all saved queries.
We get here from /beeswax/list_designs?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show design items belonging to a user. Default to all users.
type=<type> - <type> is "hql", for saved query type. Default to show all.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "name", "desc", and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
text=<frag> - Search for fragment "frag" in names and descriptions.
"""
DEFAULT_PAGE_SIZE = 20
app_name = get_app_name(request)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
# Get search filter input if any
search_filter = request.GET.get('text', None)
if search_filter is not None:
querydict_query[ prefix + 'text' ] = search_filter
page, filter_params = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
return render('list_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'prefix': prefix,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def list_trashed_designs(request):
DEFAULT_PAGE_SIZE = 20
app_name= get_app_name(request)
user = request.user
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
# Get search filter input if any
search_filter = request.GET.get('text', None)
if search_filter is not None:
querydict_query[ prefix + 'text' ] = search_filter
page, filter_params = _list_designs(user, querydict_query, DEFAULT_PAGE_SIZE, prefix, is_trashed=True)
return render('list_trashed_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'prefix': prefix,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def my_queries(request):
"""
View a mix of history and saved queries.
It understands all the GET params in ``list_query_history`` (with a ``h-`` prefix)
and those in ``list_designs`` (with a ``q-`` prefix). The only thing it disallows
is the ``user`` filter, since this view only shows what belongs to the user.
"""
DEFAULT_PAGE_SIZE = 30
app_name= get_app_name(request)
# Extract the history list.
prefix = 'h-'
querydict_history = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_history[ prefix + 'user' ] = request.user
querydict_history[ prefix + 'type' ] = app_name
hist_page, hist_filter = _list_query_history(request.user,
querydict_history,
DEFAULT_PAGE_SIZE,
prefix)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'user' ] = request.user
querydict_query[ prefix + 'type' ] = app_name
query_page, query_filter = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter_params = hist_filter
filter_params.update(query_filter)
return render('my_queries.mako', request, {
'request': request,
'h_page': hist_page,
'q_page': query_page,
'filter_params': filter_params,
'designs_json': json.dumps([query.id for query in query_page.object_list])
})
def list_query_history(request):
"""
View the history of query (for the current user).
We get here from /beeswax/query_history?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show history items from a user. Default to current user only.
Also accepts ':all' to show all history items.
type=<type> - <type> is "beeswax|impala", for design type. Default to show all.
design_id=<id> - Show history for this particular design id.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "state", "name" (design name), and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
auto_query=<bool> - Show auto generated actions (drop table, read data, etc). Default True
"""
DEFAULT_PAGE_SIZE = 100
prefix = 'q-'
share_queries = request.user.is_superuser
querydict_query = request.GET.copy()
if not share_queries:
querydict_query[prefix + 'user'] = request.user.username
app_name = get_app_name(request)
querydict_query[prefix + 'type'] = app_name
page, filter_params = _list_query_history(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter = request.GET.get(prefix + 'search') and request.GET.get(prefix + 'search') or ''
if request.GET.get('format') == 'json':
resp = {
'queries': [massage_query_history_for_json(app_name, query_history) for query_history in page.object_list]
}
return JsonResponse(resp)
return render('list_history.mako', request, {
'request': request,
'page': page,
'filter_params': filter_params,
'share_queries': share_queries,
'prefix': prefix,
'filter': filter,
})
def massage_query_history_for_json(app_name, query_history):
return {
'id': query_history.id,
'design_id': query_history.design.id,
'query': escape(query_history.query),
'timeInMs': time.mktime(query_history.submission_date.timetuple()),
'timeFormatted': query_history.submission_date.strftime("%x %X"),
'designUrl': reverse(app_name + ':execute_design', kwargs={'design_id': query_history.design.id}),
'resultsUrl': not query_history.is_failure() and reverse(app_name + ':watch_query_history', kwargs={'query_history_id': query_history.id}) or ""
}
def download(request, id, format):
try:
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
LOG.debug('Download results for query %s: [ %s ]' % (query_history.server_id, query_history.query))
return data_export.download(query_history.get_handle(), format, db)
except Exception, e:
if not hasattr(e, 'message') or not e.message:
message = e
else:
message = e.message
raise PopupException(message, detail='')
"""
Queries Views
"""
def execute_query(request, design_id=None, query_history_id=None):
"""
View function for executing an arbitrary query.
"""
action = 'query'
if query_history_id:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
design = query_history.design
try:
if query_history.server_id and query_history.server_guid:
handle, state = _get_query_handle_and_state(query_history)
if 'on_success_url' in request.GET:
if request.GET.get('on_success_url') and any([regexp.match(request.GET.get('on_success_url')) for regexp in REDIRECT_WHITELIST.get()]):
action = 'watch-redirect'
else:
action = 'watch-results'
else:
action = 'editor-results'
except QueryServerException, e:
if 'Invalid query handle' in e.message or 'Invalid OperationHandle' in e.message:
query_history.save_state(QueryHistory.STATE.expired)
LOG.warn("Invalid query handle", exc_info=sys.exc_info())
action = 'editor-expired-results'
else:
raise e
else:
# Check perms.
authorized_get_design(request, design_id)
app_name = get_app_name(request)
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
query_history = None
doc = design and design.id and design.doc.get()
context = {
'design': design,
'query': query_history, # Backward
'query_history': query_history,
'autocomplete_base_url': reverse(get_app_name(request) + ':api_autocomplete_databases', kwargs={}),
'autocomplete_base_url_hive': reverse('beeswax:api_autocomplete_databases', kwargs={}),
'can_edit_name': design and design.id and not design.is_auto,
'doc_id': doc and doc.id or -1,
'can_edit': doc and doc.can_write(request.user),
'action': action,
'on_success_url': request.GET.get('on_success_url'),
'has_metastore': 'metastore' in get_apps_dict(request.user)
}
return render('execute.mako', request, context)
def view_results(request, id, first_row=0):
"""
Returns the view for the results of the QueryHistory with the given id.
The query results MUST be ready.
To display query results, one should always go through the execute_query view.
If the result set has has_result_set=False, display an empty result.
If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just
spits out a warning if first_row doesn't match the servers conception.
Multiple readers will produce a confusing interaction here, and that's known.
It understands the ``context`` GET parameter. (See execute_query().)
"""
first_row = long(first_row)
start_over = (first_row == 0)
results = type('Result', (object,), {
'rows': 0,
'columns': [],
'has_more': False,
'start_row': 0,
})
data = []
fetch_error = False
error_message = ''
log = ''
columns = []
app_name = get_app_name(request)
query_history = authorized_get_query_history(request, id, must_exist=True)
query_server = query_history.get_query_server_config()
db = dbms.get(request.user, query_server)
handle, state = _get_query_handle_and_state(query_history)
context_param = request.GET.get('context', '')
query_context = parse_query_context(context_param)
# Update the status as expired should not be accessible
expired = state == models.QueryHistory.STATE.expired
# Retrieve query results or use empty result if no result set
try:
if query_server['server_name'] == 'impala' and not handle.has_result_set:
downloadable = False
else:
results = db.fetch(handle, start_over, 100)
# Materialize and HTML escape results
data = escape_rows(results.rows())
# We display the "Download" button only when we know that there are results:
downloadable = first_row > 0 or data
log = db.get_log(handle)
columns = results.data_table.cols()
except Exception, ex:
LOG.exception('error fetching results')
fetch_error = True
error_message, log = expand_exception(ex, db, handle)
# Handle errors
error = fetch_error or results is None or expired
context = {
'error': error,
'message': error_message,
'query': query_history,
'results': data,
'columns': columns,
'expected_first_row': first_row,
'log': log,
'hadoop_jobs': app_name != 'impala' and parse_out_jobs(log),
'query_context': query_context,
'can_save': False,
'context_param': context_param,
'expired': expired,
'app_name': app_name,
'next_json_set': None,
'is_finished': query_history.is_finished()
}
if not error:
download_urls = {}
if downloadable:
for format in common.DL_FORMATS:
download_urls[format] = reverse(app_name + ':download', kwargs=dict(id=str(id), format=format))
results.start_row = first_row
context.update({
'id': id,
'results': data,
'has_more': results.has_more,
'next_row': results.start_row + len(data),
'start_row': results.start_row,
'expected_first_row': first_row,
'columns': columns,
'download_urls': download_urls,
'can_save': query_history.owner == request.user,
'next_json_set':
reverse(get_app_name(request) + ':view_results', kwargs={
'id': str(id),
'first_row': results.start_row + len(data)
}
)
+ ('?context=' + context_param or '') + '&format=json'
})
context['columns'] = massage_columns_for_json(columns)
if 'save_form' in context:
del context['save_form']
if 'query' in context:
del context['query']
return JsonResponse(context)
def configuration(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
session = Session.objects.get_session(request.user, query_server['server_name'])
if session:
properties = json.loads(session.properties)
# Redact passwords
for key, value in properties.items():
if 'password' in key.lower():
properties[key] = '*' * len(value)
else:
properties = {}
return render("configuration.mako", request, {'configuration': properties})
"""
Other views
"""
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
app_name = get_app_name(request)
db_name = request.POST.get('db_name', 'default')
beeswax.management.commands.beeswax_install_examples.Command().handle(app_name=app_name, db_name=db_name, user=request.user)
response['status'] = 0
except Exception, err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return JsonResponse(response)
@login_notrequired
def query_done_cb(request, server_id):
"""
A callback for query completion notification. When the query is done,
BeeswaxServer notifies us by sending a GET request to this view.
"""
message_template = '<html><head></head>%(message)s<body></body></html>'
message = {'message': 'error'}
try:
query_history = QueryHistory.objects.get(server_id=server_id + '\n')
# Update the query status
query_history.set_to_available()
# Find out details about the query
if not query_history.notify:
message['message'] = 'email_notify is false'
return HttpResponse(message_template % message)
design = query_history.design
user = query_history.owner
subject = _("Beeswax query completed.")
if design:
subject += ": %s" % (design.name,)
link = "%s%s" % \
(get_desktop_uri_prefix(),
reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id}))
body = _("%(subject)s. See the results here: %(link)s\n\nQuery:\n%(query)s") % {
'subject': subject, 'link': link, 'query': query_history.query
}
user.email_user(subject, body)
message['message'] = 'sent'
except Exception, ex:
msg = "Failed to send query completion notification via e-mail: %s" % (ex)
LOG.error(msg)
message['message'] = msg
return HttpResponse(message_template % message)
"""
Utils
"""
def massage_columns_for_json(cols):
massaged_cols = []
for column in cols:
massaged_cols.append({
'name': column.name,
'type': column.type,
'comment': column.comment
})
return massaged_cols
def authorized_get_design(request, design_id, owner_only=False, must_exist=False):
if design_id is None and not must_exist:
return None
try:
design = SavedQuery.objects.get(id=design_id)
except SavedQuery.DoesNotExist:
if must_exist:
raise PopupException(_('Design %(id)s does not exist.') % {'id': design_id})
else:
return None
if owner_only:
design.doc.get().can_write_or_exception(request.user)
else:
design.doc.get().can_read_or_exception(request.user)
return design
def authorized_get_query_history(request, query_history_id, owner_only=False, must_exist=False):
if query_history_id is None and not must_exist:
return None
try:
query_history = QueryHistory.get(id=query_history_id)
except QueryHistory.DoesNotExist:
if must_exist:
raise PopupException(_('QueryHistory %(id)s does not exist.') % {'id': query_history_id})
else:
return None
# Some queries don't have a design so are not linked to Document Model permission
if query_history.design is None or not query_history.design.doc.exists():
if not request.user.is_superuser and request.user != query_history.owner:
raise PopupException(_('Permission denied to read QueryHistory %(id)s') % {'id': query_history_id})
else:
query_history.design.doc.get().can_read_or_exception(request.user)
return query_history
def safe_get_design(request, design_type, design_id=None):
"""
Return a new design, if design_id is None,
Return the design with the given id and type. If the design is not found,
display a notification and return a new design.
"""
design = None
if design_id is not None:
design = authorized_get_design(request, design_id)
if design is None:
design = SavedQuery(owner=request.user, type=design_type)
return design
def make_parameterization_form(query_str):
"""
Creates a django form on the fly with arguments from the
query.
"""
variables = find_variables(query_str)
if len(variables) > 0:
class Form(forms.Form):
for name in sorted(variables):
locals()[name] = forms.CharField(required=True)
return Form
else:
return None
def execute_directly(request, query, query_server=None,
design=None, on_success_url=None, on_success_params=None,
**kwargs):
"""
execute_directly(request, query_msg, tablename, design) -> HTTP response for execution
This method wraps around dbms.execute_query() to take care of the HTTP response
after the execution.
query
The HQL model Query object.
query_server
To which Query Server to submit the query.
Dictionary with keys: ['server_name', 'server_host', 'server_port'].
design
The design associated with the query.
on_success_url
Where to go after the query is done. The URL handler may expect an option "context" GET
param. (See ``watch_query``.) For advanced usage, on_success_url can be a function, in
which case the on complete URL is the return of:
on_success_url(history_obj) -> URL string
Defaults to the view results page.
on_success_params
Optional params to pass to the on_success_url (in additional to "context").
Note that this may throw a Beeswax exception.
"""
if design is not None:
authorized_get_design(request, design.id)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
query_history = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id})
# Prepare the GET params for the watch_url
get_dict = QueryDict(None, mutable=True)
# (1) on_success_url
if on_success_url:
if callable(on_success_url):
on_success_url = on_success_url(query_history)
get_dict['on_success_url'] = on_success_url
# (2) misc
if on_success_params:
get_dict.update(on_success_params)
return format_preserving_redirect(request, watch_url, get_dict)
def _list_designs(user, querydict, page_size, prefix="", is_trashed=False):
"""
_list_designs(user, querydict, page_size, prefix, is_trashed) -> (page, filter_param)
A helper to gather the designs page. It understands all the GET params in
``list_designs``, by reading keys from the ``querydict`` with the given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='last_modified',
name='name',
desc='description',
type='extra',
)
# Trash and security
if is_trashed:
db_queryset = Document.objects.trashed_docs(SavedQuery, user)
else:
db_queryset = Document.objects.available_docs(SavedQuery, user)
# Filter by user
filter_username = querydict.get(prefix + 'user')
if filter_username:
try:
db_queryset = db_queryset.filter(owner=User.objects.get(username=filter_username))
except User.DoesNotExist:
# Don't care if a bad filter term is provided
pass
# Design type
d_type = querydict.get(prefix + 'type')
if d_type and d_type in SavedQuery.TYPES_MAPPING.keys():
db_queryset = db_queryset.filter(extra=str(SavedQuery.TYPES_MAPPING[d_type]))
# Text search
frag = querydict.get(prefix + 'text')
if frag:
db_queryset = db_queryset.filter(Q(name__icontains=frag) | Q(description__icontains=frag))
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
else:
sort_dir, sort_attr = '', sort_key
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_designs: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr])
designs = [job.content_object for job in db_queryset.all() if job.content_object and job.content_object.is_auto == False]
pagenum = int(querydict.get(prefix + 'page', 1))
paginator = Paginator(designs, page_size)
page = paginator.page(pagenum)
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort', 'text') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _get_query_handle_and_state(query_history):
"""
Front-end wrapper to handle exceptions. Expects the query to be submitted.
"""
handle = query_history.get_handle()
if handle is None:
raise PopupException(_("Failed to retrieve query state from the Query Server."))
state = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(handle)
if state is None:
raise PopupException(_("Failed to contact Server to check query status."))
return (handle, state)
def parse_query_context(context):
"""
parse_query_context(context) -> ('table', <table_name>) -or- ('design', <design_obj>)
"""
if not context:
return None
pair = context.split(':', 1)
if len(pair) != 2 or pair[0] not in ('table', 'design'):
LOG.error("Invalid query context data: %s" % (context,))
return None
if pair[0] == 'design': # Translate design id to design obj
pair[1] = models.SavedQuery.get(int(pair[1]))
return pair
def parse_out_jobs(log, engine='mr', with_state=False):
"""
Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
with_state: If True, will return a list of dict items with 'job_id', 'started', 'finished'
"""
ret = []
if engine.lower() == 'mr':
start_pattern = HADOOP_JOBS_RE
elif engine.lower() == 'spark':
start_pattern = SPARK_APPLICATION_RE
elif engine.lower() == 'tez':
start_pattern = TEZ_APPLICATION_RE
else:
raise ValueError(_('Cannot parse job IDs for execution engine %(engine)s') % {'engine': engine})
for match in start_pattern.finditer(log):
job_id = match.group(1)
if with_state:
if job_id not in list(job['job_id'] for job in ret):
ret.append({'job_id': job_id, 'started': True, 'finished': False})
end_pattern = 'Ended Job = %s' % job_id
if end_pattern in log:
job = next((job for job in ret if job['job_id'] == job_id), None)
if job is not None:
job['finished'] = True
else:
ret.append({'job_id': job_id, 'started': True, 'finished': True})
else:
if job_id not in ret:
ret.append(job_id)
return ret
def _copy_prefix(prefix, base_dict):
"""Copy keys starting with ``prefix``"""
querydict = QueryDict(None, mutable=True)
for key, val in base_dict.iteritems():
if key.startswith(prefix):
querydict[key] = val
return querydict
def _list_query_history(user, querydict, page_size, prefix=""):
"""
_list_query_history(user, querydict, page_size, prefix) -> (page, filter_param)
A helper to gather the history page. It understands all the GET params in
``list_query_history``, by reading keys from the ``querydict`` with the
given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='submission_date',
state='last_state',
name='design__name',
type='design__type',
)
db_queryset = models.QueryHistory.objects.select_related()
# Filtering
#
# Queries without designs are the ones we submitted on behalf of the user,
# (e.g. view table data). Exclude those when returning query history.
if querydict.get(prefix + 'auto_query', 'on') != 'on':
db_queryset = db_queryset.exclude(design__isnull=False, design__is_auto=True)
user_filter = querydict.get(prefix + 'user', user.username)
if user_filter != ':all':
db_queryset = db_queryset.filter(owner__username=user_filter)
# Design id
design_id = querydict.get(prefix + 'design_id')
if design_id:
if design_id.isdigit():
db_queryset = db_queryset.filter(design__id=int(design_id))
else:
raise PopupException(_('list_query_history requires design_id parameter to be an integer: %s') % design_id)
# Search
search_filter = querydict.get(prefix + 'search')
if search_filter:
db_queryset = db_queryset.filter(Q(design__name__icontains=search_filter) | Q(query__icontains=search_filter) | Q(owner__username__icontains=search_filter))
# Design type
d_type = querydict.get(prefix + 'type')
if d_type:
if d_type not in SavedQuery.TYPES_MAPPING.keys():
LOG.warn('Bad parameter to list_query_history: type=%s' % (d_type,))
else:
db_queryset = db_queryset.filter(design__type=SavedQuery.TYPES_MAPPING[d_type])
# If recent query
recent = querydict.get('recent')
if recent:
db_queryset = db_queryset.filter(is_cleared=False)
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
sort_dir, sort_attr = '', sort_key
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_query_history: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr], '-id')
# Get the total return count before slicing
total_count = db_queryset.count()
# Slicing (must be the last filter applied)
pagenum = int(querydict.get(prefix + 'page', 1))
if pagenum < 1:
pagenum = 1
db_queryset = db_queryset[ page_size * (pagenum - 1) : page_size * pagenum ]
paginator = Paginator(db_queryset, page_size, total=total_count)
page = paginator.page(pagenum)
# We do slicing ourselves, rather than letting the Paginator handle it, in order to
# update the last_state on the running queries
for history in page.object_list:
_update_query_state(history.get_full_object())
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort', 'design_id', 'auto_query', 'search') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _update_query_state(query_history):
"""
Update the last_state for a QueryHistory object. Returns success as True/False.
This only occurs iff the current last_state is submitted or running, since the other
states are stable, more-or-less.
Note that there is a transition from available/failed to expired. That occurs lazily
when the user attempts to view results that have expired.
"""
if query_history.last_state <= models.QueryHistory.STATE.running.index:
try:
state_enum = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(query_history.get_handle())
if state_enum is None:
# Error was logged at the source
return False
except Exception, e:
LOG.error(e)
state_enum = models.QueryHistory.STATE.failed
query_history.save_state(state_enum)
return True
def get_db_choices(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
db = dbms.get(request.user, query_server)
dbs = db.get_databases()
return [(db, db) for db in dbs]
WHITESPACE = re.compile("\s+", re.MULTILINE)
def collapse_whitespace(s):
return WHITESPACE.sub(" ", s).strip()
|
{
"content_hash": "5aaec733a81557755bff7e45ec2b9c4e",
"timestamp": "",
"source": "github",
"line_count": 1046,
"max_line_length": 160,
"avg_line_length": 34.01625239005736,
"alnum_prop": 0.668924425957674,
"repo_name": "jayceyxc/hue",
"id": "82eb8aa1b673c0f980b08585e26d557af72cb68b",
"size": "36373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/beeswax/src/beeswax/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2716690"
},
{
"name": "C++",
"bytes": "200268"
},
{
"name": "CSS",
"bytes": "630891"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "23982883"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "5068327"
},
{
"name": "Lex",
"bytes": "36239"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "146292"
},
{
"name": "Mako",
"bytes": "3334641"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "45608023"
},
{
"name": "Roff",
"bytes": "16669"
},
{
"name": "Shell",
"bytes": "46700"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "517693"
},
{
"name": "Yacc",
"bytes": "381310"
}
],
"symlink_target": ""
}
|
import pytest
from selenium import webdriver
import os
# Fixtures are a way to setup data dependencies in automated tests.
@pytest.fixture(scope="function")
def driver(request):
desired_caps = {}
wd = None
if os.getenv('RUN_TARGET') == "SAUCE":
# sauce labs.
desired_caps['browserName'] = ""
desired_caps['appiumVersion'] = "1.4.16"
#desired_caps['deviceName'] = "iPhone 5"
desired_caps['deviceName'] = "iPhone Simulator"
desired_caps['deviceOrientation'] = "portrait"
desired_caps['platformVersion'] = "9.2"
desired_caps['platformName'] = "iOS"
desired_caps['app'] = "sauce-storage:TapIt.zip"
desired_caps['name'] = os.environ['TEST_NAME']
# saucelabs connection string.
sauce_user = os.getenv('SAUCE_USER')
sauce_key = os.getenv('SAUCE_KEY')
wd = webdriver.Remote("http://{sauce_user}:{sauce_key}@ondemand.saucelabs.com:80/wd/hub".format(
sauce_user=sauce_user,
sauce_key=sauce_key),
desired_caps)
elif os.getenv('RUN_TARGET') == "AMAZON_DEVICE_FARM" or os.getenv('SCREENSHOT_PATH') is not None :
# Using a hack that SCREENSHOT_PATH is provided by Amazon Device Farm.
# We have to do this because when running with the ADF Jenkins Plugin, we do not have the
# opportunity to set the enviornment variables.
wd = webdriver.Remote('http://0.0.0.0:4723/wd/hub', desired_caps)
else:
# Localhost appium
desired_caps['appium-version'] = '1.0'
desired_caps['platformName'] = 'iOS'
desired_caps['platformVersion'] = '9.2'
desired_caps['deviceName'] = 'iPhone 6'
desired_caps['app'] = os.path.abspath('staging/TapIt.app')
# local host
wd = webdriver.Remote('http://0.0.0.0:4723/wd/hub', desired_caps)
wd.implicitly_wait(300)
# A finalizer is added if there needs to be a teardown to undo the effects of the automated test.
def fin():
wd.quit()
request.addfinalizer(fin)
return wd # Returns a fixture that contains the test data we need for the test.
# Test classes start with the word "Test". It should be named Test + Feature you are testing.
class TestExample:
# Test methods start with the word "test", name this using the pattern,
# "test_ + (what you are testing) + "_" + (what is the expected result)
# The parameters for a test are fixtures needed. Use the fixture's return to feed data into
# a test.
def test_example_works(self, driver):
driver.find_element_by_xpath("//UIAApplication[1]/UIAWindow[1]/UIAButton[1]").click()
score = driver.find_element_by_xpath("//UIAApplication[1]/UIAWindow[1]/UIAStaticText[2]").text
assert "1" in score
|
{
"content_hash": "a0f486dfacc67878c6561c5993c7261a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 104,
"avg_line_length": 40.44927536231884,
"alnum_prop": 0.6402723038337513,
"repo_name": "dlai0001/appium-spike-running-tests-oncloud",
"id": "c72330435be2e91016072b0bd45ae865309a30c8",
"size": "2791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_examply.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5157"
}
],
"symlink_target": ""
}
|
from distutils.core import setup, Extension
import os, sys
def local_path(path):
local_dir = os.path.dirname(__file__)
return os.path.normpath(os.path.join(local_dir, path))
def parse_version_from_c():
cfile = open(local_path('../src/dablooms.c'))
result = ''
for line in cfile:
parts = line.split()
if len(parts) == 3 and parts[:2] == ['#define', 'DABLOOMS_VERSION']:
result = parts[2].strip('"')
break
cfile.close()
return result
def path_from_env(name, default):
return os.environ.get(name, local_path(default))
module1 = Extension('pydablooms',
include_dirs = [local_path('../src')],
sources = [local_path('pydablooms.c'),
local_path('../src/dablooms.c'),
local_path('../src/murmur.c'), ],
)
setup (name = 'pydablooms',
version = parse_version_from_c(),
description = 'This is a a python extension of the scaling, counting, bloom filter, dablooms.',
author = 'Justin P. Hines',
author_email = 'justinhines@bit.ly',
url = 'http://github.com/bitly/dablooms.git',
ext_modules = [module1])
|
{
"content_hash": "a9eef49e16e032199a25d73ca5c5bbd6",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 99,
"avg_line_length": 34.68571428571428,
"alnum_prop": 0.5675453047775947,
"repo_name": "bitly/dablooms",
"id": "add7ba56e57f3041b791377813fb1c7bf347e7d6",
"size": "1214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pydablooms/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "38954"
},
{
"name": "Go",
"bytes": "2500"
},
{
"name": "Makefile",
"bytes": "5065"
},
{
"name": "Python",
"bytes": "4439"
}
],
"symlink_target": ""
}
|
'''
All zstack license operations
@author: Quarkonics
'''
import os
import apibinding.api_actions as api_actions
import apibinding.inventory as inventory
import account_operations
import zstackwoodpecker.test_util as test_util
def get_license_info(session_uuid = None):
action = api_actions.GetLicenseInfoAction()
test_util.action_logger('Get license info')
result = account_operations.execute_action_with_session(action, \
session_uuid)
return result
def reload_license(session_uuid = None):
action = api_actions.ReloadLicenseAction()
test_util.action_logger('Reload license')
result = account_operations.execute_action_with_session(action, \
session_uuid)
return result
def update_license(node_uuid, file_license, session_uuid = None):
action = api_actions.UpdateLicenseAction()
action.managementNodeUuid = node_uuid
action.license = file_license
test_util.action_logger('update license from UI')
result = account_operations.execute_action_with_session(action, \
session_uuid)
return result
def get_license_addons_info(session_uuid = None):
action = api_actions.GetLicenseAddOnsAction()
test_util.action_logger('Get license addons info')
result = account_operations.execute_action_with_session(action, \
session_uuid)
return result
def delete_license(node_uuid, uuid, session_uuid = None):
action = api_actions.DeleteLicenseAction()
action.managementNodeUuid = node_uuid
action.uuid = uuid
test_util.action_logger('delete license [uuid:] %s' % uuid)
result = account_operations.execute_action_with_session(action, \
session_uuid)
return result
|
{
"content_hash": "bec9706572cdd7a04bea640293fb990a",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 69,
"avg_line_length": 30.603448275862068,
"alnum_prop": 0.6929577464788732,
"repo_name": "zstackio/zstack-woodpecker",
"id": "24d5796628cc2406f74b5801e2afd767b4d1f2bc",
"size": "1775",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zstackwoodpecker/zstackwoodpecker/operations/license_operations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hitch'
copyright = u'2014, Clark Fischer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Hitchdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Hitch.tex', u'Hitch Documentation',
u'Clark Fischer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hitch', u'Hitch Documentation',
[u'Clark Fischer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Hitch', u'Hitch Documentation',
u'Clark Fischer', 'Hitch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
|
{
"content_hash": "787cb198b5569e5388477db3e32454a3",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 80,
"avg_line_length": 31.836909871244636,
"alnum_prop": 0.7020760312752764,
"repo_name": "clarkf/hitch",
"id": "15bf37b642214c57e8e6bb5dd7851067a018fdd4",
"size": "7834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "28956"
},
{
"name": "Python",
"bytes": "7834"
}
],
"symlink_target": ""
}
|
from syzoj import oj, db
import zipfile, os
import hashlib
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(120), index=True)
md5 = db.Column(db.String(50), index=True)
def __init__(self, file):
self.file = file
self.md5 = self.calc_md5(file)
@staticmethod
def calc_md5(file):
file.seek(0)
m = hashlib.md5()
while True:
data = file.read(8192)
if not data:
break
m.update(data)
md5 = m.hexdigest()
return md5
def save_file(self):
self.file.seek(0)
self.file.save(os.path.join(oj.config['UPLOAD_FOLDER'], self.md5))
def get_file_path(self):
return os.path.join(oj.config["UPLOAD_FOLDER"], self.md5)
def save(self):
db.session.add(self)
db.session.commit()
class FileParser(object):
@staticmethod
def parse_as_testdata(file):
filename = file.get_file_path()
if not zipfile.is_zipfile(filename):
return (False, "This file isn\'t zipfile")
with zipfile.ZipFile(filename) as zip_file:
file_list = zip_file.namelist()
if "data_rule.txt" not in file_list:
return (False, "Can\'t find data_rule.txt in testdata pack.")
data_rule = zip_file.read("data_rule.txt")
lines = data_rule.split('\n')
for i in range(0, len(lines)):
lines[i] = lines[i].replace('\r', '').replace('\n', '')
if len(lines) < 3:
return (False, "data_rule.txt should have 3 lines.")
data_no = lines[0].split()
input_name = lines[1]
output_name = lines[2]
ret = []
for i in data_no:
i = int(i)
input_file = input_name.replace('#', str(i))
output_file = output_name.replace('#', str(i))
if input_file not in file_list:
return (False, "Can\'t find %s file." % input_file)
if output_file not in file_list:
return (False, "Can\'t find %s file." % output_file)
ret.append((input_file, output_file))
return (True, ret)
|
{
"content_hash": "1cb06eff44e49cb963c564fe8d2f07b8",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 29.371794871794872,
"alnum_prop": 0.5259711916193802,
"repo_name": "cdcq/jzyzj",
"id": "da7bac0b1c7977c01ebed39ccb4542f477a3f432",
"size": "2291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syzoj/models/file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "64378"
},
{
"name": "Python",
"bytes": "66587"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
from pandas.compat.numpy import (
np_percentile_argname,
np_version_under1p21,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
)
import pandas._testing as tm
@pytest.fixture(
params=[["linear", "single"], ["nearest", "table"]], ids=lambda x: "-".join(x)
)
def interp_method(request):
"""(interpolation, method) arguments for quantile"""
return request.param
class TestDataFrameQuantile:
@pytest.mark.parametrize(
"non_num_col",
[
pd.date_range("2014-01-01", periods=3, freq="m"),
["a", "b", "c"],
[DataFrame, Series, Timestamp],
],
)
def test_numeric_only_default_false_warning(
self, non_num_col, interp_method, request, using_array_manager
):
# GH #7308
interpolation, method = interp_method
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]})
df["C"] = non_num_col
expected = Series(
[2.0, 3.0],
index=["A", "B"],
name=0.5,
)
if interpolation == "nearest":
expected = expected.astype(np.int64)
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
with tm.assert_produces_warning(FutureWarning, match="numeric_only"):
result = df.quantile(0.5, interpolation=interpolation, method=method)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"df,expected",
[
[
DataFrame(
{
0: Series(pd.arrays.SparseArray([1, 2])),
1: Series(pd.arrays.SparseArray([3, 4])),
}
),
Series([1.5, 3.5], name=0.5),
],
[
DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")),
Series([1.0], name=0.5),
],
],
)
def test_quantile_sparse(self, df, expected):
# GH#17198
# GH#24600
result = df.quantile()
tm.assert_series_equal(result, expected)
def test_quantile(
self, datetime_frame, interp_method, using_array_manager, request
):
interpolation, method = interp_method
df = datetime_frame
result = df.quantile(
0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method
)
expected = Series(
[np.percentile(df[col], 10) for col in df.columns],
index=df.columns,
name=0.1,
)
if interpolation == "linear":
# np.percentile values only comparable to linear interpolation
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result.index, expected.index)
request.node.add_marker(
pytest.mark.xfail(
using_array_manager, reason="Name set incorrectly for arraymanager"
)
)
assert result.name == expected.name
result = df.quantile(
0.9, axis=1, numeric_only=True, interpolation=interpolation, method=method
)
expected = Series(
[np.percentile(df.loc[date], 90) for date in df.index],
index=df.index,
name=0.9,
)
if interpolation == "linear":
# np.percentile values only comparable to linear interpolation
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result.index, expected.index)
request.node.add_marker(
pytest.mark.xfail(
using_array_manager, reason="Name set incorrectly for arraymanager"
)
)
assert result.name == expected.name
def test_empty(self, interp_method):
interpolation, method = interp_method
q = DataFrame({"x": [], "y": []}).quantile(
0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method
)
assert np.isnan(q["x"]) and np.isnan(q["y"])
def test_non_numeric_exclusion(self, interp_method, request, using_array_manager):
interpolation, method = interp_method
df = DataFrame({"col1": ["A", "A", "B", "B"], "col2": [1, 2, 3, 4]})
rs = df.quantile(
0.5, numeric_only=True, interpolation=interpolation, method=method
)
with tm.assert_produces_warning(FutureWarning, match="Select only valid"):
xp = df.median().rename(0.5)
if interpolation == "nearest":
xp = (xp + 0.5).astype(np.int64)
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
tm.assert_series_equal(rs, xp)
def test_axis(self, interp_method, request, using_array_manager):
# axis
interpolation, method = interp_method
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
if interpolation == "nearest":
expected = expected.astype(np.int64)
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
tm.assert_series_equal(result, expected)
result = df.quantile(
[0.5, 0.75], axis=1, interpolation=interpolation, method=method
)
expected = DataFrame(
{1: [1.5, 1.75], 2: [2.5, 2.75], 3: [3.5, 3.75]}, index=[0.5, 0.75]
)
if interpolation == "nearest":
expected.iloc[0, :] -= 0.5
expected.iloc[1, :] += 0.25
expected = expected.astype(np.int64)
tm.assert_frame_equal(result, expected, check_index_type=True)
def test_axis_numeric_only_true(self, interp_method, request, using_array_manager):
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
interpolation, method = interp_method
df = DataFrame([[1, 2, 3], ["a", "b", 4]])
result = df.quantile(
0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method
)
expected = Series([3.0, 4.0], index=[0, 1], name=0.5)
if interpolation == "nearest":
expected = expected.astype(np.int64)
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
tm.assert_series_equal(result, expected)
def test_quantile_date_range(self, interp_method, request, using_array_manager):
# GH 2460
interpolation, method = interp_method
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
ser = Series(dti)
df = DataFrame(ser)
result = df.quantile(
numeric_only=False, interpolation=interpolation, method=method
)
expected = Series(
["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]"
)
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
tm.assert_series_equal(result, expected)
def test_quantile_axis_mixed(self, interp_method, request, using_array_manager):
# mixed on axis=1
interpolation, method = interp_method
df = DataFrame(
{
"A": [1, 2, 3],
"B": [2.0, 3.0, 4.0],
"C": pd.date_range("20130101", periods=3),
"D": ["foo", "bar", "baz"],
}
)
result = df.quantile(
0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method
)
expected = Series([1.5, 2.5, 3.5], name=0.5)
if interpolation == "nearest":
expected -= 0.5
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
tm.assert_series_equal(result, expected)
# must raise
msg = "'<' not supported between instances of 'Timestamp' and 'float'"
with pytest.raises(TypeError, match=msg):
df.quantile(0.5, axis=1, numeric_only=False)
def test_quantile_axis_parameter(self, interp_method, request, using_array_manager):
# GH 9543/9544
interpolation, method = interp_method
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(0.5, axis=0, interpolation=interpolation, method=method)
expected = Series([2.0, 3.0], index=["A", "B"], name=0.5)
if interpolation == "nearest":
expected = expected.astype(np.int64)
tm.assert_series_equal(result, expected)
expected = df.quantile(
0.5, axis="index", interpolation=interpolation, method=method
)
if interpolation == "nearest":
expected = expected.astype(np.int64)
tm.assert_series_equal(result, expected)
result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
if interpolation == "nearest":
expected = expected.astype(np.int64)
tm.assert_series_equal(result, expected)
result = df.quantile(
0.5, axis="columns", interpolation=interpolation, method=method
)
tm.assert_series_equal(result, expected)
msg = "No axis named -1 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.quantile(0.1, axis=-1, interpolation=interpolation, method=method)
msg = "No axis named column for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.quantile(0.1, axis="column")
def test_quantile_interpolation(self):
# see gh-10174
# interpolation method other than default linear
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(0.5, axis=1, interpolation="nearest")
expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
# cross-check interpolation=nearest results in original dtype
exp = np.percentile(
np.array([[1, 2, 3], [2, 3, 4]]),
0.5,
axis=0,
**{np_percentile_argname: "nearest"},
)
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="int64")
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": [2.0, 3.0, 4.0]}, index=[1, 2, 3])
result = df.quantile(0.5, axis=1, interpolation="nearest")
expected = Series([1.0, 2.0, 3.0], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
exp = np.percentile(
np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]),
0.5,
axis=0,
**{np_percentile_argname: "nearest"},
)
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="float64")
tm.assert_series_equal(result, expected)
# axis
result = df.quantile([0.5, 0.75], axis=1, interpolation="lower")
expected = DataFrame(
{1: [1.0, 1.0], 2: [2.0, 2.0], 3: [3.0, 3.0]}, index=[0.5, 0.75]
)
tm.assert_frame_equal(result, expected)
# test degenerate case
df = DataFrame({"x": [], "y": []})
q = df.quantile(0.1, axis=0, interpolation="higher")
assert np.isnan(q["x"]) and np.isnan(q["y"])
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"])
result = df.quantile([0.25, 0.5], interpolation="midpoint")
# https://github.com/numpy/numpy/issues/7163
expected = DataFrame(
[[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[0.25, 0.5],
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
def test_quantile_interpolation_datetime(self, datetime_frame):
# see gh-10174
# interpolation = linear (default case)
df = datetime_frame
q = df.quantile(0.1, axis=0, numeric_only=True, interpolation="linear")
assert q["A"] == np.percentile(df["A"], 10)
def test_quantile_interpolation_int(self, int_frame):
# see gh-10174
df = int_frame
# interpolation = linear (default case)
q = df.quantile(0.1)
assert q["A"] == np.percentile(df["A"], 10)
# test with and without interpolation keyword
q1 = df.quantile(0.1, axis=0, interpolation="linear")
assert q1["A"] == np.percentile(df["A"], 10)
tm.assert_series_equal(q, q1)
def test_quantile_multi(self, interp_method, request, using_array_manager):
interpolation, method = interp_method
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"])
result = df.quantile([0.25, 0.5], interpolation=interpolation, method=method)
expected = DataFrame(
[[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[0.25, 0.5],
columns=["a", "b", "c"],
)
if interpolation == "nearest":
expected = expected.astype(np.int64)
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
tm.assert_frame_equal(result, expected)
def test_quantile_multi_axis_1(self, interp_method, request, using_array_manager):
interpolation, method = interp_method
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"])
result = df.quantile(
[0.25, 0.5], axis=1, interpolation=interpolation, method=method
)
expected = DataFrame(
[[1.0, 2.0, 3.0]] * 2, index=[0.25, 0.5], columns=[0, 1, 2]
)
if interpolation == "nearest":
expected = expected.astype(np.int64)
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
tm.assert_frame_equal(result, expected)
def test_quantile_multi_empty(self, interp_method):
interpolation, method = interp_method
result = DataFrame({"x": [], "y": []}).quantile(
[0.1, 0.9], axis=0, interpolation=interpolation, method=method
)
expected = DataFrame(
{"x": [np.nan, np.nan], "y": [np.nan, np.nan]}, index=[0.1, 0.9]
)
tm.assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({"a": pd.to_datetime(["2010", "2011"]), "b": [0, 5]})
# exclude datetime
result = df.quantile(0.5, numeric_only=True)
expected = Series([2.5], index=["b"], name=0.5)
tm.assert_series_equal(result, expected)
# datetime
result = df.quantile(0.5, numeric_only=False)
expected = Series(
[Timestamp("2010-07-02 12:00:00"), 2.5], index=["a", "b"], name=0.5
)
tm.assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([0.5], numeric_only=False)
expected = DataFrame(
[[Timestamp("2010-07-02 12:00:00"), 2.5]], index=[0.5], columns=["a", "b"]
)
tm.assert_frame_equal(result, expected)
# axis = 1
df["c"] = pd.to_datetime(["2011", "2012"])
result = df[["a", "c"]].quantile(0.5, axis=1, numeric_only=False)
expected = Series(
[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")],
index=[0, 1],
name=0.5,
)
tm.assert_series_equal(result, expected)
result = df[["a", "c"]].quantile([0.5], axis=1, numeric_only=False)
expected = DataFrame(
[[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")]],
index=[0.5],
columns=[0, 1],
)
tm.assert_frame_equal(result, expected)
# empty when numeric_only=True
result = df[["a", "c"]].quantile(0.5, numeric_only=True)
expected = Series([], index=[], dtype=np.float64, name=0.5)
tm.assert_series_equal(result, expected)
result = df[["a", "c"]].quantile([0.5], numeric_only=True)
expected = DataFrame(index=[0.5])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"datetime64[ns]",
"datetime64[ns, US/Pacific]",
"timedelta64[ns]",
"Period[D]",
],
)
def test_quantile_dt64_empty(self, dtype, interp_method):
# GH#41544
interpolation, method = interp_method
df = DataFrame(columns=["a", "b"], dtype=dtype)
res = df.quantile(
0.5, axis=1, numeric_only=False, interpolation=interpolation, method=method
)
expected = Series([], index=[], name=0.5, dtype=dtype)
tm.assert_series_equal(res, expected)
# no columns in result, so no dtype preservation
res = df.quantile(
[0.5],
axis=1,
numeric_only=False,
interpolation=interpolation,
method=method,
)
expected = DataFrame(index=[0.5])
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize("invalid", [-1, 2, [0.5, -1], [0.5, 2]])
def test_quantile_invalid(self, invalid, datetime_frame, interp_method):
msg = "percentiles should all be in the interval \\[0, 1\\]"
interpolation, method = interp_method
with pytest.raises(ValueError, match=msg):
datetime_frame.quantile(invalid, interpolation=interpolation, method=method)
def test_quantile_box(self, interp_method, request, using_array_manager):
interpolation, method = interp_method
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
df = DataFrame(
{
"A": [
Timestamp("2011-01-01"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
],
"B": [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-03", tz="US/Eastern"),
],
"C": [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
],
}
)
res = df.quantile(
0.5, numeric_only=False, interpolation=interpolation, method=method
)
exp = Series(
[
Timestamp("2011-01-02"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timedelta("2 days"),
],
name=0.5,
index=["A", "B", "C"],
)
tm.assert_series_equal(res, exp)
res = df.quantile(
[0.5], numeric_only=False, interpolation=interpolation, method=method
)
exp = DataFrame(
[
[
Timestamp("2011-01-02"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timedelta("2 days"),
]
],
index=[0.5],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(res, exp)
def test_quantile_box_nat(self):
# DatetimeLikeBlock may be consolidated and contain NaT in different loc
df = DataFrame(
{
"A": [
Timestamp("2011-01-01"),
pd.NaT,
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
],
"a": [
Timestamp("2011-01-01"),
Timestamp("2011-01-02"),
pd.NaT,
Timestamp("2011-01-03"),
],
"B": [
Timestamp("2011-01-01", tz="US/Eastern"),
pd.NaT,
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-03", tz="US/Eastern"),
],
"b": [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.NaT,
Timestamp("2011-01-03", tz="US/Eastern"),
],
"C": [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.NaT,
],
"c": [
pd.NaT,
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
],
},
columns=list("AaBbCc"),
)
res = df.quantile(0.5, numeric_only=False)
exp = Series(
[
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timedelta("2 days"),
pd.Timedelta("2 days"),
],
name=0.5,
index=list("AaBbCc"),
)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame(
[
[
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timedelta("2 days"),
pd.Timedelta("2 days"),
]
],
index=[0.5],
columns=list("AaBbCc"),
)
tm.assert_frame_equal(res, exp)
def test_quantile_nan(self, interp_method, request, using_array_manager):
interpolation, method = interp_method
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
# GH 14357 - float block where some cols have missing values
df = DataFrame({"a": np.arange(1, 6.0), "b": np.arange(1, 6.0)})
df.iloc[-1, 1] = np.nan
res = df.quantile(0.5, interpolation=interpolation, method=method)
exp = Series(
[3.0, 2.5 if interpolation == "linear" else 3.0], index=["a", "b"], name=0.5
)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method)
exp = DataFrame(
{
"a": [3.0, 4.0],
"b": [2.5, 3.25] if interpolation == "linear" else [3.0, 4.0],
},
index=[0.5, 0.75],
)
tm.assert_frame_equal(res, exp)
res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method)
exp = Series(np.arange(1.0, 6.0), name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile(
[0.5, 0.75], axis=1, interpolation=interpolation, method=method
)
exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])
if interpolation == "nearest":
exp.iloc[1, -1] = np.nan
tm.assert_frame_equal(res, exp)
# full-nan column
df["b"] = np.nan
res = df.quantile(0.5, interpolation=interpolation, method=method)
exp = Series([3.0, np.nan], index=["a", "b"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method)
exp = DataFrame({"a": [3.0, 4.0], "b": [np.nan, np.nan]}, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
def test_quantile_nat(self, interp_method, request, using_array_manager):
interpolation, method = interp_method
if method == "table" and using_array_manager:
request.node.add_marker(
pytest.mark.xfail(reason="Axis name incorrectly set.")
)
# full NaT column
df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(
0.5, numeric_only=False, interpolation=interpolation, method=method
)
exp = Series([pd.NaT], index=["a"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile(
[0.5], numeric_only=False, interpolation=interpolation, method=method
)
exp = DataFrame({"a": [pd.NaT]}, index=[0.5])
tm.assert_frame_equal(res, exp)
# mixed non-null / full null column
df = DataFrame(
{
"a": [
Timestamp("2012-01-01"),
Timestamp("2012-01-02"),
Timestamp("2012-01-03"),
],
"b": [pd.NaT, pd.NaT, pd.NaT],
}
)
res = df.quantile(
0.5, numeric_only=False, interpolation=interpolation, method=method
)
exp = Series([Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile(
[0.5], numeric_only=False, interpolation=interpolation, method=method
)
exp = DataFrame(
[[Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"]
)
tm.assert_frame_equal(res, exp)
def test_quantile_empty_no_rows_floats(self, interp_method):
interpolation, method = interp_method
df = DataFrame(columns=["a", "b"], dtype="float64")
res = df.quantile(0.5, interpolation=interpolation, method=method)
exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], interpolation=interpolation, method=method)
exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5])
tm.assert_frame_equal(res, exp)
res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method)
exp = Series([], index=[], dtype="float64", name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], axis=1, interpolation=interpolation, method=method)
exp = DataFrame(columns=[], index=[0.5])
tm.assert_frame_equal(res, exp)
def test_quantile_empty_no_rows_ints(self, interp_method):
interpolation, method = interp_method
df = DataFrame(columns=["a", "b"], dtype="int64")
res = df.quantile(0.5, interpolation=interpolation, method=method)
exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5)
tm.assert_series_equal(res, exp)
@pytest.mark.filterwarnings(
"ignore:The behavior of DatetimeArray._from_sequence:FutureWarning"
)
def test_quantile_empty_no_rows_dt64(self, interp_method):
interpolation, method = interp_method
# datetimes
df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]")
res = df.quantile(
0.5, numeric_only=False, interpolation=interpolation, method=method
)
exp = Series(
[pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5
)
tm.assert_series_equal(res, exp)
# Mixed dt64/dt64tz
df["a"] = df["a"].dt.tz_localize("US/Central")
res = df.quantile(
0.5, numeric_only=False, interpolation=interpolation, method=method
)
exp = exp.astype(object)
tm.assert_series_equal(res, exp)
# both dt64tz
df["b"] = df["b"].dt.tz_localize("US/Central")
res = df.quantile(
0.5, numeric_only=False, interpolation=interpolation, method=method
)
exp = exp.astype(df["b"].dtype)
tm.assert_series_equal(res, exp)
def test_quantile_empty_no_columns(self, interp_method):
# GH#23925 _get_numeric_data may drop all columns
interpolation, method = interp_method
df = DataFrame(pd.date_range("1/1/18", periods=5))
df.columns.name = "captain tightpants"
result = df.quantile(
0.5, numeric_only=True, interpolation=interpolation, method=method
)
expected = Series([], index=[], name=0.5, dtype=np.float64)
expected.index.name = "captain tightpants"
tm.assert_series_equal(result, expected)
result = df.quantile(
[0.5], numeric_only=True, interpolation=interpolation, method=method
)
expected = DataFrame([], index=[0.5], columns=[])
expected.columns.name = "captain tightpants"
tm.assert_frame_equal(result, expected)
def test_quantile_item_cache(self, using_array_manager, interp_method):
# previous behavior incorrect retained an invalid _item_cache entry
interpolation, method = interp_method
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.quantile(numeric_only=False, interpolation=interpolation, method=method)
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_invalid_method(self):
with pytest.raises(ValueError, match="Invalid method: foo"):
DataFrame(range(1)).quantile(0.5, method="foo")
def test_table_invalid_interpolation(self):
with pytest.raises(ValueError, match="Invalid interpolation: foo"):
DataFrame(range(1)).quantile(0.5, method="table", interpolation="foo")
class TestQuantileExtensionDtype:
# TODO: tests for axis=1?
# TODO: empty case?
@pytest.fixture(
params=[
pytest.param(
pd.IntervalIndex.from_breaks(range(10)),
marks=pytest.mark.xfail(reason="raises when trying to add Intervals"),
),
pd.period_range("2016-01-01", periods=9, freq="D"),
pd.date_range("2016-01-01", periods=9, tz="US/Pacific"),
pd.timedelta_range("1 Day", periods=9),
pd.array(np.arange(9), dtype="Int64"),
pd.array(np.arange(9), dtype="Float64"),
],
ids=lambda x: str(x.dtype),
)
def index(self, request):
# NB: not actually an Index object
idx = request.param
idx.name = "A"
return idx
@pytest.fixture
def obj(self, index, frame_or_series):
# bc index is not always an Index (yet), we need to re-patch .name
obj = frame_or_series(index).copy()
if frame_or_series is Series:
obj.name = "A"
else:
obj.columns = ["A"]
return obj
def compute_quantile(self, obj, qs):
if isinstance(obj, Series):
result = obj.quantile(qs)
else:
result = obj.quantile(qs, numeric_only=False)
return result
def test_quantile_ea(self, request, obj, index):
# result should be invariant to shuffling
indexer = np.arange(len(index), dtype=np.intp)
np.random.shuffle(indexer)
obj = obj.iloc[indexer]
qs = [0.5, 0, 1]
result = self.compute_quantile(obj, qs)
if np_version_under1p21 and index.dtype == "timedelta64[ns]":
msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood"
mark = pytest.mark.xfail(reason=msg, raises=TypeError)
request.node.add_marker(mark)
exp_dtype = index.dtype
if index.dtype == "Int64":
# match non-nullable casting behavior
exp_dtype = "Float64"
# expected here assumes len(index) == 9
expected = Series(
[index[4], index[0], index[-1]], dtype=exp_dtype, index=qs, name="A"
)
expected = type(obj)(expected)
tm.assert_equal(result, expected)
def test_quantile_ea_with_na(self, obj, index):
obj.iloc[0] = index._na_value
obj.iloc[-1] = index._na_value
# result should be invariant to shuffling
indexer = np.arange(len(index), dtype=np.intp)
np.random.shuffle(indexer)
obj = obj.iloc[indexer]
qs = [0.5, 0, 1]
result = self.compute_quantile(obj, qs)
# expected here assumes len(index) == 9
expected = Series(
[index[4], index[1], index[-2]], dtype=index.dtype, index=qs, name="A"
)
expected = type(obj)(expected)
tm.assert_equal(result, expected)
# TODO(GH#39763): filtering can be removed after GH#39763 is fixed
@pytest.mark.filterwarnings("ignore:Using .astype to convert:FutureWarning")
def test_quantile_ea_all_na(self, request, obj, index):
obj.iloc[:] = index._na_value
# TODO(ArrayManager): this casting should be unnecessary after GH#39763 is fixed
obj = obj.astype(index.dtype)
assert np.all(obj.dtypes == index.dtype)
# result should be invariant to shuffling
indexer = np.arange(len(index), dtype=np.intp)
np.random.shuffle(indexer)
obj = obj.iloc[indexer]
qs = [0.5, 0, 1]
result = self.compute_quantile(obj, qs)
if np_version_under1p21 and index.dtype == "timedelta64[ns]":
msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood"
mark = pytest.mark.xfail(reason=msg, raises=TypeError)
request.node.add_marker(mark)
expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value)
expected = Series(expected, index=qs, name="A")
if expected.dtype == "Int64":
expected = expected.astype("Float64")
expected = type(obj)(expected)
tm.assert_equal(result, expected)
def test_quantile_ea_scalar(self, request, obj, index):
# scalar qs
# result should be invariant to shuffling
indexer = np.arange(len(index), dtype=np.intp)
np.random.shuffle(indexer)
obj = obj.iloc[indexer]
qs = 0.5
result = self.compute_quantile(obj, qs)
if np_version_under1p21 and index.dtype == "timedelta64[ns]":
msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood"
mark = pytest.mark.xfail(reason=msg, raises=TypeError)
request.node.add_marker(mark)
exp_dtype = index.dtype
if index.dtype == "Int64":
exp_dtype = "Float64"
expected = Series({"A": index[4]}, dtype=exp_dtype, name=0.5)
if isinstance(obj, Series):
expected = expected["A"]
assert result == expected
else:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype, expected_data, expected_index, axis",
[
["float64", [], [], 1],
["int64", [], [], 1],
["float64", [np.nan, np.nan], ["a", "b"], 0],
["int64", [np.nan, np.nan], ["a", "b"], 0],
],
)
def test_empty_numeric(self, dtype, expected_data, expected_index, axis):
# GH 14564
df = DataFrame(columns=["a", "b"], dtype=dtype)
result = df.quantile(0.5, axis=axis)
expected = Series(
expected_data, name=0.5, index=Index(expected_index), dtype="float64"
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype, expected_data, expected_index, axis, expected_dtype",
[
["datetime64[ns]", [], [], 1, "datetime64[ns]"],
["datetime64[ns]", [pd.NaT, pd.NaT], ["a", "b"], 0, "datetime64[ns]"],
],
)
def test_empty_datelike(
self, dtype, expected_data, expected_index, axis, expected_dtype
):
# GH 14564
df = DataFrame(columns=["a", "b"], dtype=dtype)
result = df.quantile(0.5, axis=axis, numeric_only=False)
expected = Series(
expected_data, name=0.5, index=Index(expected_index), dtype=expected_dtype
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"expected_data, expected_index, axis",
[
[[np.nan, np.nan], range(2), 1],
[[], [], 0],
],
)
def test_datelike_numeric_only(self, expected_data, expected_index, axis):
# GH 14564
df = DataFrame(
{
"a": pd.to_datetime(["2010", "2011"]),
"b": [0, 5],
"c": pd.to_datetime(["2011", "2012"]),
}
)
result = df[["a", "c"]].quantile(0.5, axis=axis, numeric_only=True)
expected = Series(
expected_data, name=0.5, index=Index(expected_index), dtype=np.float64
)
tm.assert_series_equal(result, expected)
|
{
"content_hash": "9765b412ef9c425ff2ce90752b8a1b69",
"timestamp": "",
"source": "github",
"line_count": 1030,
"max_line_length": 88,
"avg_line_length": 36.95825242718447,
"alnum_prop": 0.5346888381012426,
"repo_name": "datapythonista/pandas",
"id": "3beb201bcfa05599ee11fd9773a4a36e17b9fc51",
"size": "38067",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/frame/methods/test_quantile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "355524"
},
{
"name": "CSS",
"bytes": "1662"
},
{
"name": "Cython",
"bytes": "1178139"
},
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "HTML",
"bytes": "456449"
},
{
"name": "Makefile",
"bytes": "505"
},
{
"name": "Python",
"bytes": "19048364"
},
{
"name": "Shell",
"bytes": "10511"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
}
|
"""
WSGI config for TaskTracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TaskTracker.settings")
application = get_wsgi_application()
|
{
"content_hash": "2cbb9a7dcae819f2dfd92754d3d385b0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.9375,
"alnum_prop": 0.7744360902255639,
"repo_name": "polarkac/TaskTracker",
"id": "e71ff8f81478068005a22e66d5d13baefed793ed",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TaskTracker/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "472"
},
{
"name": "HTML",
"bytes": "9260"
},
{
"name": "Python",
"bytes": "34344"
}
],
"symlink_target": ""
}
|
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase.ToontownGlobals import GlobalDialogColor
from DistributedMinigame import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownTimer
import TravelGameGlobals
import math
from pandac.PandaModules import rad2Deg
from toontown.toontowngui import TTDialog
from direct.interval.IntervalGlobal import *
import VoteResultsPanel
import VoteResultsTrolleyPanel
IconDict = {ToontownGlobals.RaceGameId: 'mg_trolley_sign_race',
ToontownGlobals.CannonGameId: 'mg_trolley_sign_cannon',
ToontownGlobals.TagGameId: 'mg_trolley_sign_tag',
ToontownGlobals.PatternGameId: 'mg_trolley_sign_minnie',
ToontownGlobals.RingGameId: 'mg_trolley_sign_ring',
ToontownGlobals.MazeGameId: 'mg_trolley_sign_maze',
ToontownGlobals.TugOfWarGameId: 'mg_trolley_sign_tugawar',
ToontownGlobals.CatchGameId: 'mg_trolley_sign_catch',
ToontownGlobals.DivingGameId: 'mg_trolley_sign_dive',
ToontownGlobals.TargetGameId: 'mg_trolley_sign_umbrella',
ToontownGlobals.PairingGameId: 'mg_trolley_sign_card',
ToontownGlobals.VineGameId: 'mg_trolley_sign_vine',
ToontownGlobals.IceGameId: 'mg_trolley_sign_ice',
ToontownGlobals.PhotoGameId: 'mg_trolley_sign_photo',
ToontownGlobals.TwoDGameId: 'mg_trolley_sign_2d',
ToontownGlobals.CogThiefGameId: 'mg_trolley_sign_theif'}
MinigameNameDict = {ToontownGlobals.RaceGameId: TTLocalizer.RaceGameTitle,
ToontownGlobals.CannonGameId: TTLocalizer.CannonGameTitle,
ToontownGlobals.TagGameId: TTLocalizer.TagGameTitle,
ToontownGlobals.PatternGameId: TTLocalizer.PatternGameTitle,
ToontownGlobals.RingGameId: TTLocalizer.RingGameTitle,
ToontownGlobals.MazeGameId: TTLocalizer.MazeGameTitle,
ToontownGlobals.TugOfWarGameId: TTLocalizer.TugOfWarGameTitle,
ToontownGlobals.CatchGameId: TTLocalizer.CatchGameTitle,
ToontownGlobals.DivingGameId: TTLocalizer.DivingGameTitle,
ToontownGlobals.TargetGameId: TTLocalizer.TargetGameTitle,
ToontownGlobals.PairingGameId: TTLocalizer.PairingGameTitle,
ToontownGlobals.VineGameId: TTLocalizer.VineGameTitle,
ToontownGlobals.TravelGameId: TTLocalizer.TravelGameTitle,
ToontownGlobals.IceGameId: TTLocalizer.IceGameTitle,
ToontownGlobals.PhotoGameId: TTLocalizer.PhotoGameTitle,
ToontownGlobals.TwoDGameId: TTLocalizer.TwoDGameTitle,
ToontownGlobals.CogThiefGameId: TTLocalizer.CogThiefGameTitle}
def makeLabel(itemName, itemNum, *extraArgs):
intVersion = int(itemName)
if intVersion < 0:
textColor = Vec4(0, 0, 1, 1)
intVersion = -intVersion
elif intVersion == 0:
textColor = Vec4(0, 0, 0, 1)
else:
textColor = Vec4(1, 0, 0, 1)
return DirectLabel(text=str(intVersion), text_fg=textColor, relief=DGG.RIDGE, frameSize=(-1.2,
1.2,
-0.225,
0.8), scale=1.0)
def map3dToAspect2d(node, point):
p3 = base.cam.getRelativePoint(node, point)
p2 = Point2()
if not base.camLens.project(p3, p2):
return None
r2d = Point3(p2[0], 0, p2[1])
a2d = aspect2d.getRelativePoint(render2d, r2d)
return a2d
def invertTable(table):
index = {}
for key in table.keys():
value = table[key]
if not index.has_key(value):
index[value] = key
return index
class DistributedTravelGame(DistributedMinigame):
notify = directNotify.newCategory('DistributedTravelGame')
idToNames = MinigameNameDict
TrolleyMoveDuration = 3
UseTrolleyResultsPanel = True
FlyCameraUp = True
FocusOnTrolleyWhileMovingUp = False
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTravelGame', [State.State('off', self.enterOff, self.exitOff, ['inputChoice']),
State.State('inputChoice', self.enterInputChoice, self.exitInputChoice, ['waitServerChoices', 'displayVotes', 'cleanup']),
State.State('waitServerChoices', self.enterWaitServerChoices, self.exitWaitServerChoices, ['displayVotes', 'cleanup']),
State.State('displayVotes', self.enterDisplayVotes, self.exitDisplayVotes, ['moveTrolley', 'cleanup']),
State.State('moveTrolley', self.enterMoveTrolley, self.exitMoveTrolley, ['inputChoice', 'winMovie', 'cleanup']),
State.State('winMovie', self.enterWinMovie, self.exitWinMovie, ['cleanup']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.currentVotes = {}
self.cameraTopView = (100, -20, 280, 0, -89, 0)
self.timer = None
self.timerStartTime = None
self.currentSwitch = 0
self.destSwitch = 0
self.minigameLabels = []
self.minigameIcons = []
self.bonusLabels = []
self.trolleyAwaySfx = base.loadSfx('phase_4/audio/sfx/SZ_trolley_away.mp3')
self.trolleyBellSfx = base.loadSfx('phase_4/audio/sfx/SZ_trolley_bell.mp3')
self.turntableRotateSfx = base.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_turntble_rotate_2.mp3')
self.wonGameSfx = base.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_bonus.mp3')
self.lostGameSfx = base.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_no_bonus_2.mp3')
self.noWinnerSfx = base.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_no_bonus.mp3')
self.boardIndex = 0
self.avNames = []
self.disconnectedAvIds = []
return
def getTitle(self):
return TTLocalizer.TravelGameTitle
def getInstructions(self):
return TTLocalizer.TravelGameInstructions
def getMaxDuration(self):
return 0
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.gameBoard = loader.loadModel('phase_4/models/minigames/toon_cannon_gameground')
self.gameBoard.setPosHpr(100, 0, 0, 0, 0, 0)
self.gameBoard.setScale(1.0)
station = loader.loadModel('phase_4/models/modules/trolley_station_TT.bam')
self.trolleyCar = station.find('**/trolley_car')
self.trolleyCar.reparentTo(hidden)
self.trolleyCarOrigPos = self.trolleyCar.getPos()
self.trolleyCarOrigHpr = self.trolleyCar.getHpr()
self.trolleyCar.setPosHpr(0, 0, 0, 0, 0, 0)
self.trolleyCar.setScale(1.0)
self.trolleyCar.setX(self.trolleyCar.getX() - TravelGameGlobals.xInc)
station.removeNode()
self.keys = self.trolleyCar.findAllMatches('**/key')
self.numKeys = self.keys.getNumPaths()
self.keyInit = []
self.keyRef = []
for i in range(self.numKeys):
key = self.keys[i]
key.setTwoSided(1)
ref = self.trolleyCar.attachNewNode('key' + `i` + 'ref')
ref.iPosHpr(key)
self.keyRef.append(ref)
self.keyInit.append(key.getTransform())
self.frontWheels = self.trolleyCar.findAllMatches('**/front_wheels')
self.numFrontWheels = self.frontWheels.getNumPaths()
self.frontWheelInit = []
self.frontWheelRef = []
for i in range(self.numFrontWheels):
wheel = self.frontWheels[i]
ref = self.trolleyCar.attachNewNode('frontWheel' + `i` + 'ref')
ref.iPosHpr(wheel)
self.frontWheelRef.append(ref)
self.frontWheelInit.append(wheel.getTransform())
self.backWheels = self.trolleyCar.findAllMatches('**/back_wheels')
self.numBackWheels = self.backWheels.getNumPaths()
self.backWheelInit = []
self.backWheelRef = []
for i in range(self.numBackWheels):
wheel = self.backWheels[i]
ref = self.trolleyCar.attachNewNode('backWheel' + `i` + 'ref')
ref.iPosHpr(wheel)
self.backWheelRef.append(ref)
self.backWheelInit.append(wheel.getTransform())
trolleyAnimationReset = Func(self.resetAnimation)
self.trainSwitches = {}
self.trainTracks = {}
self.tunnels = {}
self.extraTrainTracks = []
turnTable = loader.loadModel('phase_4/models/minigames/trolley_game_turntable')
minPoint = Point3(0, 0, 0)
maxPoint = Point3(0, 0, 0)
turnTable.calcTightBounds(minPoint, maxPoint)
self.fullLength = maxPoint[0]
for key in TravelGameGlobals.BoardLayouts[self.boardIndex].keys():
info = TravelGameGlobals.BoardLayouts[self.boardIndex][key]
switchModel = turnTable.find('**/turntable1').copyTo(render)
switchModel.setPos(*info['pos'])
switchModel.reparentTo(hidden)
self.trainSwitches[key] = switchModel
zAdj = 0
for otherSwitch in info['links']:
info2 = TravelGameGlobals.BoardLayouts[self.boardIndex][otherSwitch]
x1, y1, z1 = info['pos']
x2, y2, z2 = info2['pos']
linkKey = (key, otherSwitch)
trainTrack = self.loadTrainTrack(x1, y1, x2, y2, zAdj)
trainTrack.reparentTo(hidden)
self.trainTracks[linkKey] = trainTrack
zAdj += 0.005
rootInfo = TravelGameGlobals.BoardLayouts[self.boardIndex][0]
rootX, rootY, rootZ = rootInfo['pos']
startX = rootX - TravelGameGlobals.xInc
trainTrack = self.loadTrainTrack(startX, rootY, rootX, rootY)
self.extraTrainTracks.append(trainTrack)
tunnelX = None
for key in TravelGameGlobals.BoardLayouts[self.boardIndex].keys():
if self.isLeaf(key):
info = TravelGameGlobals.BoardLayouts[self.boardIndex][key]
switchX, switchY, switchZ = info['pos']
endX = switchX + TravelGameGlobals.xInc
trainTrack = self.loadTrainTrack(switchX, switchY, endX, switchY)
self.extraTrainTracks.append(trainTrack)
tempModel = loader.loadModel('phase_4/models/minigames/trolley_game_turntable')
tunnel = tempModel.find('**/tunnel1')
tunnel.reparentTo(render)
tempModel.removeNode()
if not tunnelX:
minTrackPoint = Point3(0, 0, 0)
maxTrackPoint = Point3(0, 0, 0)
trainTrack.calcTightBounds(minTrackPoint, maxTrackPoint)
tunnelX = maxTrackPoint[0]
tunnel.setPos(tunnelX, switchY, 0)
tunnel.wrtReparentTo(trainTrack)
self.tunnels[key] = tunnel
turnTable.removeNode()
self.loadGui()
self.introMovie = self.getIntroMovie()
self.music = base.loadMusic('phase_4/audio/bgm/MG_Travel.mid')
self.flashWinningBeansTrack = None
return
def loadTrainTrack(self, x1, y1, x2, y2, zAdj = 0):
turnTable = loader.loadModel('phase_4/models/minigames/trolley_game_turntable')
trainPart = turnTable.find('**/track_a2')
trackHeight = 0.03
trainTrack = render.attachNewNode('trainTrack%d%d%d%d' % (x1,
y1,
x2,
y2))
trainTrack.setPos(x1, y1, trackHeight)
xDiff = abs(x2 - x1)
yDiff = abs(y2 - y1)
angleInRadians = math.atan((float(y2) - y1) / (x2 - x1))
angle = rad2Deg(angleInRadians)
desiredLength = math.sqrt(xDiff * xDiff + yDiff * yDiff)
lengthToGo = desiredLength
partIndex = 0
lengthCovered = 0
while lengthToGo > self.fullLength / 2.0:
onePart = trainPart.copyTo(trainTrack)
onePart.setX(lengthCovered)
lengthToGo -= self.fullLength
lengthCovered += self.fullLength
trainTrack.setH(angle)
newX = x1 + (x2 - x1) / 2.0
newY = y1 + (y2 - y1) / 2.0
trainTrack.setPos(x1, y1, trackHeight + zAdj)
turnTable.removeNode()
return trainTrack
def loadGui(self):
scoreText = [str(self.currentVotes[self.localAvId])]
self.gui = DirectFrame()
self.remainingVotesFrame = DirectFrame(parent=self.gui, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=GlobalDialogColor, geom_scale=(7, 1, 1), pos=(-0.9, 0, 0.8), scale=0.1, text=TTLocalizer.TravelGameRemainingVotes, text_align=TextNode.ALeft, text_scale=TTLocalizer.DTGremainingVotesFrame, text_pos=(-3.4, -0.1, 0.0))
self.localVotesRemaining = DirectLabel(parent=self.remainingVotesFrame, relief=None, text=scoreText, text_fg=VBase4(0, 0.5, 0, 1), text_align=TextNode.ARight, text_scale=0.7, pos=(3.2, 0, -0.15))
guiModel = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
self.choiceFrame = DirectFrame(parent=self.gui, relief=None, pos=(-0.55, 0, -0.85), image=DGG.getDefaultDialogGeom(), image_scale=(1.4, 1, 0.225), image_color=GlobalDialogColor)
self.useLabel = DirectLabel(text=TTLocalizer.TravelGameUse, parent=self.choiceFrame, pos=(-0.59, 0, -0.01), text_scale=TTLocalizer.DTGuseLabel, relief=None)
self.votesPeriodLabel = DirectLabel(text=TTLocalizer.TravelGameVotesWithPeriod, parent=self.choiceFrame, pos=(-0.21, 0, -0.01), text_scale=TTLocalizer.DTGvotesPeriodLabel, relief=None, text_align=TextNode.ALeft)
self.votesToGoLabel = DirectLabel(text=TTLocalizer.TravelGameVotesToGo, parent=self.choiceFrame, pos=(-0.21, 0, -0.01), text_scale=TTLocalizer.DTGvotesToGoLabel, relief=None, text_align=TextNode.ALeft)
self.upLabel = DirectLabel(text=TTLocalizer.TravelGameUp, parent=self.choiceFrame, pos=(0.31, 0, -0.01), text_scale=TTLocalizer.DTGupLabel, text_fg=Vec4(0, 0, 1, 1), relief=None, text_align=TextNode.ALeft)
self.downLabel = DirectLabel(text=TTLocalizer.TravelGameDown, parent=self.choiceFrame, pos=(0.31, 0, -0.01), text_scale=TTLocalizer.DTGdownLabel, text_fg=Vec4(1, 0, 0, 1), relief=None, text_align=TextNode.ALeft)
self.scrollList = DirectScrolledList(parent=self.choiceFrame, relief=None, pos=(-0.36, 0, -0.02), incButton_image=(guiModel.find('**/FndsLst_ScrollUp'),
guiModel.find('**/FndsLst_ScrollDN'),
guiModel.find('**/FndsLst_ScrollUp_Rllvr'),
guiModel.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_pos=(0.0, 0.0, -0.04), incButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), incButton_scale=(1.0, 1.0, -1.0), decButton_image=(guiModel.find('**/FndsLst_ScrollUp'),
guiModel.find('**/FndsLst_ScrollDN'),
guiModel.find('**/FndsLst_ScrollUp_Rllvr'),
guiModel.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_pos=(0.0, 0.0, 0.095), decButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), itemFrame_pos=(0.0, 0.0, 0.0), itemFrame_relief=DGG.GROOVE, numItemsVisible=1, itemMakeFunction=makeLabel, items=[], scrollSpeed=3.0, itemFrame_scale=0.1, command=self.scrollChoiceChanged)
self.putChoicesInScrollList()
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okImageList = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
self.voteButton = DirectButton(parent=self.choiceFrame, relief=None, image=okImageList, image_scale=3.0, pos=(0.85, 0, 0.0), text=TTLocalizer.TravelGameVoteWithExclamation, text_scale=TTLocalizer.DTGvoteButton, text_pos=(0, 0), command=self.handleInputChoice)
self.waitingChoicesLabel = DirectLabel(text=TTLocalizer.TravelGameWaitingChoices, text_fg=VBase4(1, 1, 1, 1), relief=None, pos=(-0.2, 0, -0.85), scale=0.075)
self.waitingChoicesLabel.hide()
self.gui.hide()
return
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
self.introMovie.finish()
del self.introMovie
self.gameBoard.removeNode()
del self.gameBoard
self.sky.removeNode()
del self.sky
self.trolleyCar.removeNode()
del self.trolleyCar
for key in self.trainSwitches.keys():
self.trainSwitches[key].removeNode()
del self.trainSwitches[key]
self.trainSwitches = {}
for key in self.tunnels.keys():
self.tunnels[key].removeNode()
del self.tunnels[key]
self.tunnels = {}
for key in self.trainTracks.keys():
self.trainTracks[key].removeNode()
del self.trainTracks[key]
self.trainTracks = {}
for trainTrack in self.extraTrainTracks:
trainTrack.removeNode()
del trainTrack
self.extraTrainTracks = []
self.gui.removeNode()
del self.gui
self.waitingChoicesLabel.destroy()
del self.waitingChoicesLabel
if self.flashWinningBeansTrack:
self.flashWinningBeansTrack.finish()
del self.flashWinningBeansTrack
for label in self.minigameLabels:
label.destroy()
del label
self.minigameLabels = []
for icon in self.minigameIcons:
icon.destroy()
icon.removeNode()
self.minigameIcons = []
if hasattr(self, 'mg_icons'):
del self.mg_icons
for label in self.bonusLabels:
label.destroy()
del label
self.bonusLabels = []
self.scrollList.destroy()
del self.scrollList
self.voteButton.destroy()
del self.voteButton
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
del self.music
def moveCameraToTop(self):
camera.reparentTo(render)
p = self.cameraTopView
camera.setPosHpr(p[0], p[1], p[2], p[3], p[4], p[5])
def moveCameraToTrolley(self):
camera.reparentTo(self.trolleyCar)
camera.setPos(-25, 0, 7.5)
camera.setHpr(-90, 0, 0)
def onstage(self):
self.notify.debug('onstage')
NametagGlobals.setOnscreenChatForced(1)
DistributedMinigame.onstage(self)
self.gameBoard.reparentTo(render)
self.sky.reparentTo(render)
self.moveCameraToTop()
self.trolleyCar.reparentTo(render)
for key in self.trainSwitches.keys():
self.trainSwitches[key].reparentTo(render)
for key in self.trainTracks.keys():
self.trainTracks[key].reparentTo(render)
for trainTrack in self.extraTrainTracks:
trainTrack.reparentTo(render)
base.transitions.irisIn(0.4)
base.setBackgroundColor(0.1875, 0.7929, 0)
base.playMusic(self.music, looping=1, volume=0.9)
self.introMovie.start()
def offstage(self):
self.notify.debug('offstage')
NametagGlobals.setOnscreenChatForced(0)
base.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
self.introMovie.finish()
self.gameBoard.hide()
self.sky.hide()
self.trolleyCar.hide()
self.gui.hide()
self.hideMinigamesAndBonuses()
for key in self.trainSwitches.keys():
self.trainSwitches[key].hide()
for key in self.trainTracks.keys():
self.trainTracks[key].hide()
for trainTrack in self.extraTrainTracks:
trainTrack.hide()
DistributedMinigame.offstage(self)
if base.localAvatar.laffMeter:
base.localAvatar.laffMeter.start()
self.music.stop()
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
for index in range(self.numPlayers):
avId = self.avIdList[index]
name = ''
avatar = self.getAvatar(avId)
if avatar:
avatar.reparentTo(self.trolleyCar)
avatar.animFSM.request('Sit')
avatar.setPosHpr(-4, -4.5 + index * 3, 2.8, 90, 0, 0)
name = avatar.getName()
self.avNames.append(name)
self.trolleyCar.setH(90)
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
self.introMovie.finish()
self.gameFSM.request('inputChoice')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterInputChoice(self):
self.notify.debug('enterInputChoice')
NametagGlobals.setOnscreenChatForced(1)
self.timer = ToontownTimer.ToontownTimer()
self.timer.hide()
if self.timerStartTime != None:
self.startTimer()
if base.localAvatar.laffMeter:
base.localAvatar.laffMeter.stop()
self.gui.show()
self.showMinigamesAndBonuses()
return
def exitInputChoice(self):
NametagGlobals.setOnscreenChatForced(0)
if self.timer != None:
self.timer.destroy()
self.timer = None
self.timerStartTime = None
self.gui.hide()
return
def enterWaitServerChoices(self):
self.notify.debug('enterWaitServerChoices')
self.waitingChoicesLabel.show()
self.gui.hide()
def exitWaitServerChoices(self):
self.waitingChoicesLabel.hide()
def enterDisplayVotes(self, votes, directions, directionToGo, directionReason):
if self.UseTrolleyResultsPanel:
self.moveCameraToTrolley()
self.hideMinigamesAndBonuses()
else:
self.moveCameraToTop()
self.resultVotes = votes
self.resultDirections = directions
self.directionToGo = directionToGo
self.directionReason = directionReason
self.resultsStr = ''
directionTotals = [0] * TravelGameGlobals.MaxDirections
for index in range(len(votes)):
if index < len(self.avNames):
avId = self.avIdList[index]
dir = directions[index]
numVotes = votes[index]
directionTotals[dir] += numVotes
curStr = TTLocalizer.TravelGameOneToonVote % {'name': self.avNames[index],
'numVotes': numVotes,
'dir': TTLocalizer.TravelGameDirections[dir]}
if not (numVotes == 0 and avId in self.disconnectedAvIds):
self.resultsStr += curStr
directionStr = TTLocalizer.TravelGameTotals
for index in range(len(directionTotals)):
directionStr += ' ' + TTLocalizer.TravelGameDirections[index] + ':'
directionStr += str(directionTotals[index])
directionStr += '\n'
self.resultsStr += directionStr
reasonStr = ''
if directionReason == TravelGameGlobals.ReasonVote:
if directionToGo == 0:
losingDirection = 1
else:
losingDirection = 0
diffVotes = directionTotals[directionToGo] - directionTotals[losingDirection]
reasonStr = ''
if diffVotes > 1:
reasonStr = TTLocalizer.TravelGameReasonVotesPlural % {'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes': diffVotes}
else:
reasonStr = TTLocalizer.TravelGameReasonVotesSingular % {'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes': diffVotes}
elif directionReason == TravelGameGlobals.ReasonRandom:
reasonStr = TTLocalizer.TravelGameReasonRandom % {'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes': directionTotals[directionToGo]}
elif directionReason == TravelGameGlobals.ReasonPlaceDecider:
reasonStr = TravelGameReasonPlace % {'name': 'TODO NAME',
'dir': TTLocalizer.TravelGameDirections[directionToGo]}
self.resultsStr += reasonStr
self.dialog = TTDialog.TTDialog(text=self.resultsStr, command=self.__cleanupDialog, style=TTDialog.NoButtons, pos=(0, 0, 1))
self.dialog.hide()
if self.UseTrolleyResultsPanel:
self.votesPanel = VoteResultsTrolleyPanel.VoteResultsTrolleyPanel(len(self.avIdList), self.avIdList, votes, directions, self.avNames, self.disconnectedAvIds, directionToGo, directionReason, directionTotals)
else:
self.votesPanel = VoteResultsPanel.VoteResultsPanel(len(self.avIdList), self.avIdList, votes, directions, self.avNames, self.disconnectedAvIds, directionToGo, directionReason, directionTotals)
self.votesPanel.startMovie()
numPlayers = len(self.avIdList)
if TravelGameGlobals.SpoofFour:
numPlayers = 4
delay = TravelGameGlobals.DisplayVotesTimePerPlayer * (numPlayers + 1)
taskMgr.doMethodLater(delay, self.displayVotesTimeoutTask, self.taskName('displayVotes-timeout'))
curSwitch = TravelGameGlobals.BoardLayouts[self.boardIndex][self.currentSwitch]
self.destSwitch = curSwitch['links'][directionToGo]
self.updateCurrentVotes()
def exitDisplayVotes(self):
taskMgr.remove(self.taskName('displayVotes-timeout'))
self.__cleanupDialog(0)
if not self.UseTrolleyResultsPanel:
self.showMinigamesAndBonuses()
self.votesPanel.destroy()
def enterMoveTrolley(self):
self.notify.debug('enterMoveTrolley')
camera.wrtReparentTo(render)
keyAngle = round(self.TrolleyMoveDuration) * 360
dist = Vec3(self.trainSwitches[self.destSwitch].getPos() - self.trainSwitches[self.currentSwitch].getPos()).length()
wheelAngle = dist / (2.0 * math.pi * 0.95) * 360
trolleyAnimateInterval = LerpFunctionInterval(self.animateTrolley, duration=self.TrolleyMoveDuration, blendType='easeInOut', extraArgs=[keyAngle, wheelAngle], name='TrolleyAnimate')
moveTrolley = Sequence()
moveTrolley.append(Func(self.resetAnimation))
newPos = self.trainSwitches[self.destSwitch].getPos()
linkKey = (self.currentSwitch, self.destSwitch)
origHeading = self.trainTracks[linkKey].getH()
heading = origHeading + 90
firstTurn = Parallel()
firstTurn.append(LerpHprInterval(self.trolleyCar, 1, Vec3(heading, 0, 0)))
firstTurn.append(LerpHprInterval(self.trainSwitches[self.currentSwitch], 1, Vec3(origHeading, 0, 0)))
firstTurn.append(LerpHprInterval(self.trainSwitches[self.destSwitch], 1, Vec3(origHeading, 0, 0)))
moveTrolley.append(firstTurn)
moveTrolley.append(Parallel(LerpPosInterval(self.trolleyCar, self.TrolleyMoveDuration, newPos, blendType='easeInOut'), trolleyAnimateInterval))
secondTurn = Parallel()
secondTurn.append(LerpHprInterval(self.trolleyCar, 1, Vec3(90, 0, 0)))
secondTurn.append(LerpHprInterval(self.trainSwitches[self.currentSwitch], 1, Vec3(0, 0, 0)))
secondTurn.append(LerpHprInterval(self.trainSwitches[self.destSwitch], 1, Vec3(0, 0, 0)))
moveTrolley.append(secondTurn)
soundTrack = Sequence()
trolleyExitBellInterval = Parallel(SoundInterval(self.trolleyBellSfx, duration=1), SoundInterval(self.turntableRotateSfx, duration=1, volume=0.5))
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
soundTrack.append(trolleyExitBellInterval)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
self.moveTrolleyIval = Parallel(moveTrolley, soundTrack)
duration = self.moveTrolleyIval.getDuration()
def focusOnTrolley(t, self = self):
pos = self.trolleyCar.getPos()
pos.setZ(pos.getZ() + 7.5)
camera.lookAt(pos)
self.lastFocusHpr = camera.getHpr()
setRightHprTime = 0
if self.FlyCameraUp:
setRightHprTime = 1.0
camIval1 = Parallel()
camIval1.append(LerpFunc(focusOnTrolley, duration - setRightHprTime, name='focusOnTrolley'))
finalPos = Vec3(self.cameraTopView[0], self.cameraTopView[1], self.cameraTopView[2])
finalHpr = Vec3(self.cameraTopView[3], self.cameraTopView[4], self.cameraTopView[5])
if self.FlyCameraUp:
if self.FocusOnTrolleyWhileMovingUp:
camIval1.append(LerpPosInterval(camera, duration - setRightHprTime, finalPos, name='cameraMove'))
camIval2 = Sequence(LerpHprInterval(camera, setRightHprTime, finalHpr, name='cameraHpr'))
else:
camIval2 = Sequence(LerpPosHprInterval(camera, setRightHprTime, finalPos, finalHpr, blendType='easeIn', name='cameraHpr'))
camIval = Sequence(camIval1, camIval2)
else:
camIval = Sequence(camIval1)
if self.UseTrolleyResultsPanel:
self.moveTrolleyIval.append(camIval)
temp = self.moveTrolleyIval
self.moveTrolleyIval = Sequence(temp)
if self.isLeaf(self.destSwitch):
self.moveTrolleyIval.append(Func(self.gameFSM.request, 'winMovie'))
else:
self.moveTrolleyIval.append(Func(self.gameFSM.request, 'inputChoice'))
self.moveTrolleyIval.start()
def exitMoveTrolley(self):
self.notify.debug('exitMoveTrolley')
self.currentSwitch = self.destSwitch
self.moveTrolleyIval.finish()
self.moveCameraToTop()
self.showMinigamesAndBonuses()
def enterWinMovie(self):
resultStr = TTLocalizer.TravelGamePlaying % {'game': self.idToNames[self.switchToMinigameDict[self.currentSwitch]]}
numToons = 0
for avId in self.avIdList:
if avId not in self.disconnectedAvIds:
numToons += 1
if numToons <= 1:
resultStr = TTLocalizer.TravelGameGoingBackToShop
reachedGoalStr = None
localAvatarWon = False
localAvatarLost = False
noWinner = True
for avId in self.avIdBonuses.keys():
name = ''
avatar = self.getAvatar(avId)
if avatar:
name = avatar.getName()
if self.avIdBonuses[avId][0] == self.currentSwitch:
noWinner = False
reachedGoalStr = TTLocalizer.TravelGameGotBonus % {'name': name,
'numBeans': self.avIdBonuses[avId][1]}
if avId == base.localAvatar.doId:
if not TravelGameGlobals.ReverseWin:
self.wonGameSfx.play()
bonusLabel = self.switchToBonusLabelDict[self.currentSwitch]
self.flashWinningBeansTrack = Sequence(LerpColorScaleInterval(bonusLabel, 0.75, Vec4(0.5, 1, 0.5, 1)), LerpColorScaleInterval(bonusLabel, 0.75, Vec4(1, 1, 1, 1)))
self.flashWinningBeansTrack.loop()
else:
self.lostGameSfx.play()
elif not TravelGameGlobals.ReverseWin:
self.lostGameSfx.play()
else:
self.wonGameSfx.play()
if noWinner:
self.noWinnerSfx.play()
resultStr += '\n\n'
resultStr += TTLocalizer.TravelGameNoOneGotBonus
if reachedGoalStr:
resultStr += '\n\n'
resultStr += reachedGoalStr
self.winDialog = TTDialog.TTDialog(text=resultStr, command=self.__cleanupWinDialog, style=TTDialog.NoButtons)
info = TravelGameGlobals.BoardLayouts[self.boardIndex][self.currentSwitch]
leafX, leafY, leafZ = info['pos']
endX = leafX + TravelGameGlobals.xInc
heading = 90
moveTrolley = Sequence()
moveTrolley.append(LerpHprInterval(self.trolleyCar, 1, Vec3(heading, 0, 0)))
moveTrolley.append(LerpPosInterval(self.trolleyCar, 3, Vec3(endX + 20, leafY, 0)))
soundTrack = Sequence()
trolleyExitBellInterval = SoundInterval(self.trolleyBellSfx, duration=1)
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
soundTrack.append(trolleyExitBellInterval)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
self.moveTrolleyIval = Parallel(moveTrolley, soundTrack)
self.moveTrolleyIval.start()
delay = 8
taskMgr.doMethodLater(delay, self.gameOverCallback, self.taskName('playMovie'))
return
def exitWinMovie(self):
taskMgr.remove(self.taskName('playMovie'))
self.moveTrolleyIval.finish()
def enterCleanup(self):
self.notify.debug('enterCleanup')
def exitCleanup(self):
pass
def setStartingVotes(self, startingVotesArray):
if not len(startingVotesArray) == len(self.avIdList):
self.notify.error('length does not match, startingVotes=%s, avIdList=%s' % (startingVotesArray, self.avIdList))
return
for index in range(len(self.avIdList)):
avId = self.avIdList[index]
self.startingVotes[avId] = startingVotesArray[index]
if not self.currentVotes.has_key(avId):
self.currentVotes[avId] = startingVotesArray[index]
self.notify.debug('starting votes = %s' % self.startingVotes)
def startTimer(self):
now = globalClock.getFrameTime()
elapsed = now - self.timerStartTime
self.timer.setPos(1.16, 0, -0.83)
self.timer.setTime(TravelGameGlobals.InputTimeout)
self.timer.countdown(TravelGameGlobals.InputTimeout - elapsed, self.handleChoiceTimeout)
self.timer.show()
def setTimerStartTime(self, timestamp):
if not self.hasLocalToon:
return
self.timerStartTime = globalClockDelta.networkToLocalTime(timestamp)
if self.timer != None:
self.startTimer()
return
def handleChoiceTimeout(self):
self.sendUpdate('setAvatarChoice', [0, 0])
self.gameFSM.request('waitServerChoices')
def putChoicesInScrollList(self):
available = self.currentVotes[self.localAvId]
if len(self.scrollList['items']) > 0:
self.scrollList.removeAllItems()
self.indexToVotes = {}
index = 0
for vote in range(available)[::-1]:
self.scrollList.addItem(str(-(vote + 1)))
self.indexToVotes[index] = vote + 1
index += 1
self.scrollList.addItem(str(0))
self.indexToVotes[index] = 0
self.zeroVoteIndex = index
index += 1
for vote in range(available):
self.scrollList.addItem(str(vote + 1))
self.indexToVotes[index] = vote + 1
index += 1
self.scrollList.scrollTo(self.zeroVoteIndex)
def getAbsVoteChoice(self):
available = self.currentVotes[self.localAvId]
retval = 0
if hasattr(self, 'scrollList'):
selectedIndex = self.scrollList.getSelectedIndex()
if self.indexToVotes.has_key(selectedIndex):
retval = self.indexToVotes[selectedIndex]
return retval
def getAbsDirectionChoice(self):
selectedIndex = self.scrollList.getSelectedIndex()
if selectedIndex < self.zeroVoteIndex:
retval = 0
elif selectedIndex == self.zeroVoteIndex:
retval = 0
else:
retval = 1
return retval
def makeTextMatchChoice(self):
self.votesPeriodLabel.hide()
self.votesToGoLabel.hide()
self.upLabel.hide()
self.downLabel.hide()
if not hasattr(self, 'scrollList') or not hasattr(self, 'zeroVoteIndex'):
return
selectedIndex = self.scrollList.getSelectedIndex()
if selectedIndex < self.zeroVoteIndex:
self.votesToGoLabel.show()
self.upLabel.show()
elif selectedIndex == self.zeroVoteIndex:
self.votesPeriodLabel.show()
else:
self.votesToGoLabel.show()
self.downLabel.show()
def scrollChoiceChanged(self):
choiceVotes = self.getAbsVoteChoice()
if choiceVotes == 1:
self.votesToGoLabel['text'] = TTLocalizer.TravelGameVoteToGo
else:
self.votesToGoLabel['text'] = TTLocalizer.TravelGameVotesToGo
available = self.currentVotes[self.localAvId]
self.localVotesRemaining['text'] = str(available - choiceVotes)
self.makeTextMatchChoice()
def setAvatarChose(self, avId):
if not self.hasLocalToon:
return
self.notify.debug('setAvatarChose: avatar: ' + str(avId) + ' choose a number')
def handleInputChoice(self):
numVotes = self.getAbsVoteChoice()
direction = self.getAbsDirectionChoice()
self.sendUpdate('setAvatarChoice', [numVotes, direction])
self.gameFSM.request('waitServerChoices')
def setServerChoices(self, votes, directions, directionToGo, directionReason):
if not self.hasLocalToon:
return
self.notify.debug('requesting displayVotes, curState=%s' % self.gameFSM.getCurrentState().getName())
self.gameFSM.request('displayVotes', [votes,
directions,
directionToGo,
directionReason])
def __cleanupDialog(self, value):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
return
def displayVotesTimeoutTask(self, task):
self.notify.debug('Done waiting for display votes')
self.gameFSM.request('moveTrolley')
return Task.done
def updateCurrentVotes(self):
for index in range(len(self.resultVotes)):
avId = self.avIdList[index]
oldCurrentVotes = self.currentVotes[avId]
self.currentVotes[avId] -= self.resultVotes[index]
self.putChoicesInScrollList()
self.makeTextMatchChoice()
def isLeaf(self, switchIndex):
retval = False
links = TravelGameGlobals.BoardLayouts[self.boardIndex][switchIndex]['links']
if len(links) == 0:
retval = True
return retval
def __cleanupWinDialog(self, value):
if hasattr(self, 'winDialog') and self.winDialog:
self.winDialog.cleanup()
self.winDialog = None
return
def gameOverCallback(self, task):
self.__cleanupWinDialog(0)
self.gameOver()
return Task.done
def setMinigames(self, switches, minigames):
if not self.hasLocalToon:
return
self.switchToMinigameDict = {}
for index in range(len(switches)):
switch = switches[index]
minigame = minigames[index]
self.switchToMinigameDict[switch] = minigame
self.notify.debug('minigameDict = %s' % self.switchToMinigameDict)
self.loadMinigameIcons()
def loadMinigameIcons(self):
self.mg_icons = loader.loadModel('phase_4/models/minigames/mg_icons')
for switch in self.switchToMinigameDict.keys():
minigame = self.switchToMinigameDict[switch]
switchPos = self.trainSwitches[switch].getPos()
labelPos = map3dToAspect2d(render, switchPos)
useText = True
iconName = None
if minigame in IconDict.keys():
iconName = IconDict[minigame]
icon = None
if self.mg_icons:
icon = self.mg_icons.find('**/%s' % iconName)
if not icon.isEmpty():
useText = False
if labelPos:
if useText:
labelPos.setZ(labelPos.getZ() - 0.1)
label = DirectLabel(text=self.idToNames[minigame], relief=None, scale=0.1, pos=labelPos, text_fg=(1.0, 1.0, 1.0, 1.0))
label.hide()
self.minigameLabels.append(label)
else:
placeHolder = DirectButton(image=icon, relief=None, text=('',
'',
self.idToNames[minigame],
''), text_scale=0.3, text_pos=(0, -0.7, 0), text_fg=(1, 1, 1, 1), clickSound=None, pressEffect=0)
placeHolder.setPos(labelPos)
placeHolder.setScale(0.2)
placeHolder.hide()
self.minigameIcons.append(placeHolder)
tunnel = self.tunnels[switch]
sign = tunnel.attachNewNode('sign')
icon.copyTo(sign)
sign.setH(-90)
sign.setZ(26)
sign.setScale(10)
return
def showMinigamesAndBonuses(self):
for label in self.minigameLabels:
label.show()
for label in self.bonusLabels:
label.show()
for icon in self.minigameIcons:
icon.show()
def hideMinigamesAndBonuses(self):
for label in self.minigameLabels:
label.hide()
for label in self.bonusLabels:
label.hide()
for icon in self.minigameIcons:
icon.hide()
def loadBonuses(self):
self.switchToBonusLabelDict = {}
for avId in self.avIdBonuses.keys():
if avId == self.localAvId:
switch = self.avIdBonuses[avId][0]
beans = self.avIdBonuses[avId][1]
switchPos = self.trainSwitches[switch].getPos()
labelPos = map3dToAspect2d(render, switchPos)
if labelPos:
labelPos.setX(labelPos.getX() + 0.1)
labelPos.setZ(labelPos.getZ() - 0.02)
bonusStr = TTLocalizer.TravelGameBonusBeans % {'numBeans': beans}
label = DirectLabel(text=bonusStr, relief=None, scale=0.1, pos=labelPos, text_fg=(1.0, 1.0, 1.0, 1.0), text_align=TextNode.ALeft)
label.hide()
self.bonusLabels.append(label)
self.switchToBonusLabelDict[switch] = label
break
return
def setBonuses(self, switches, beans):
if not self.hasLocalToon:
return
self.avIdBonuses = {}
for index in range(len(self.avIdList)):
avId = self.avIdList[index]
switch = switches[index]
bean = beans[index]
self.avIdBonuses[avId] = (switch, bean)
self.notify.debug('self.avIdBonuses = %s' % self.avIdBonuses)
self.loadBonuses()
def handleDisabledAvatar(self, avId):
self.notify.warning('DistrbutedTravelGame: handleDisabledAvatar: disabled avId: ' + str(avId))
self.disconnectedAvIds.append(avId)
def setBoardIndex(self, boardIndex):
self.boardIndex = boardIndex
def getIntroMovie(self):
rootInfo = TravelGameGlobals.BoardLayouts[self.boardIndex][0]
rootX, rootY, rootZ = rootInfo['pos']
startX = rootX - TravelGameGlobals.xInc
heading = 90
moveTrolley = Sequence()
moveTrolley.append(Func(self.trolleyCar.setH, 90))
moveTrolley.append(LerpPosInterval(self.trolleyCar, 3, Vec3(rootX, rootY, 0), startPos=Vec3(startX, rootY, 0)))
moveTrolley.append(LerpHprInterval(self.trolleyCar, 1, Vec3(heading, 0, 0)))
soundTrack = Sequence()
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
trolleyExitBellInterval = SoundInterval(self.trolleyBellSfx, duration=1)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
retval = Parallel(moveTrolley, soundTrack)
return retval
def animateTrolley(self, t, keyAngle, wheelAngle):
for i in range(self.numKeys):
key = self.keys[i]
ref = self.keyRef[i]
key.setH(ref, t * keyAngle)
for i in range(self.numFrontWheels):
frontWheel = self.frontWheels[i]
ref = self.frontWheelRef[i]
frontWheel.setH(ref, t * wheelAngle)
for i in range(self.numBackWheels):
backWheel = self.backWheels[i]
ref = self.backWheelRef[i]
backWheel.setH(ref, t * wheelAngle)
def resetAnimation(self):
for i in range(self.numKeys):
self.keys[i].setTransform(self.keyInit[i])
for i in range(self.numFrontWheels):
self.frontWheels[i].setTransform(self.frontWheelInit[i])
for i in range(self.numBackWheels):
self.backWheels[i].setTransform(self.backWheelInit[i])
|
{
"content_hash": "5e41f9759a657ee000ec82a2c797c325",
"timestamp": "",
"source": "github",
"line_count": 1011,
"max_line_length": 341,
"avg_line_length": 43.98714144411474,
"alnum_prop": 0.6387758314407141,
"repo_name": "ksmit799/Toontown-Source",
"id": "4984ccda8fe4203446cbe1955b45fb57367feeec",
"size": "44471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/minigame/DistributedTravelGame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
}
|
from dbserver.server import TSDB_Server
from timeseries.util import *
import os.path, os
DIR_TS_DATA = '/var/dbserver/tsdata'
DIR_TS_DB = '/var/dbserver/tsdb'
def main():
db = TSDB_Server()
db.run()
if __name__ == '__main__':
# If the random time series haven't yet been generated, generate them
if not os.path.exists(DIR_TS_DATA):
os.makedirs(DIR_TS_DATA)
generate_timeseries(1000, DIR_TS_DATA)
if not os.path.exists(DIR_TS_DB):
os.makedirs(DIR_TS_DB)
generate_vantage_points(20, DIR_TS_DATA, DIR_TS_DB)
main()
|
{
"content_hash": "e37b176f3a63282f83983d636de2136c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 25.28,
"alnum_prop": 0.5838607594936709,
"repo_name": "ashilgard/cs207project",
"id": "f000d78680e694738834ffc4969b9febdd5230c3",
"size": "656",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/dbserver/start_dbserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "296"
},
{
"name": "HTML",
"bytes": "1704"
},
{
"name": "JavaScript",
"bytes": "6411"
},
{
"name": "Makefile",
"bytes": "552"
},
{
"name": "Python",
"bytes": "162587"
},
{
"name": "Shell",
"bytes": "1766"
}
],
"symlink_target": ""
}
|
"""Unit tests for Credential Wallet class
NERC Data Grid Project
"""
__author__ = "P J Kershaw"
__date__ = "03/10/08"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import unittest
import os
from ndg.security.common.utils.configfileparsers import \
CaseSensitiveConfigParser, INIPropertyFile, readAndValidateProperties
from ConfigParser import SafeConfigParser
from os.path import join as jnPath
mkPath=lambda file_: jnPath(os.environ['NDGSEC_CONFIGFILEPARSERS_UNITTEST_DIR'],
file_)
import logging
logging.basicConfig(level=logging.DEBUG)
class ConfigFileParsersTestCase(unittest.TestCase):
"""Unit test case for ndg.security.common.utils.configfileparsers
module.
"""
def setUp(self):
if 'NDGSEC_INT_DEBUG' in os.environ:
import pdb
pdb.set_trace()
if 'NDGSEC_CONFIGFILEPARSERS_UNITTEST_DIR' not in os.environ:
os.environ['NDGSEC_CONFIGFILEPARSERS_UNITTEST_DIR'] = \
os.path.abspath(os.path.dirname(__file__))
self.cfg = CaseSensitiveConfigParser()
self.configFilePath = mkPath("test.cfg")
def test1CaseSensitiveConfigParser(self):
caseSensitiveCfg = CaseSensitiveConfigParser()
caseSensitiveCfg.read(self.configFilePath)
cfg = SafeConfigParser()
cfg.read(self.configFilePath)
cfgVal = cfg.getboolean('test1CaseSensitiveConfigParser',
'CaseSensitiveOption')
caseSensitiveVal=caseSensitiveCfg.getboolean(
'test1CaseSensitiveConfigParser',
'CaseSensitiveOption')
assert(caseSensitiveVal != cfgVal)
def test2INIPropertyFile(self):
cfgFile = INIPropertyFile()
validKeys = {'name': NotImplemented, 'useSSL': NotImplemented,
'attCertLifetime': 2000}
prop = cfgFile(self.configFilePath, validKeys,
sections=('test2INIPropertyFile',),
prefix='attributeAuthority')
print "properties ..."
print prop
print("prop['test2INIPropertyFile']['name']=%s"%
prop['test2INIPropertyFile']['name'])
print("prop['test2INIPropertyFile']['useSSL']"
"=%s" % prop['test2INIPropertyFile']['useSSL'])
print("prop['test2INIPropertyFile']['attCertLifetime']=%s" %
prop['test2INIPropertyFile']['attCertLifetime'])
assert(isinstance(prop['test2INIPropertyFile']['attCertLifetime'],
float))
assert(isinstance(prop['test2INIPropertyFile']['useSSL'], bool))
def test3ReadAndValidateProperties(self):
# keys set to NotImplemented must be present in the config, others
# accept defaults as given. A key set to a populated dict denotes
# a subcomponent.
validKeys = {
'sslCertFile': NotImplemented,
'sslKeyFile': NotImplemented,
'sslCACertFilePathList': [],
'credentialWallet': {
'attributeAuthorityURI': 'A DEFAULT VALUE',
'caCertFilePathList': [],
'mapFromTrustedHosts': False,
'attCertRefreshElapse': -1
}
}
prop = readAndValidateProperties(self.configFilePath, validKeys,
sections=('test3ReadAndValidateProperties',),
prefix='sessionManager')
print "properties ..."
print prop
assert(prop.keys()==['test3ReadAndValidateProperties'])
assert(prop['test3ReadAndValidateProperties']['sslCertFile'])
assert('credentialWallet' in prop['test3ReadAndValidateProperties'])
# attributeAuthorityURI is not present in the config so it should be
# set to its default value
assert(prop['test3ReadAndValidateProperties']
['credentialWallet']['attributeAuthorityURI']=='A DEFAULT VALUE')
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "637e72062dce3c584c8970bf706229c0",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 80,
"avg_line_length": 38.280701754385966,
"alnum_prop": 0.5962419798350137,
"repo_name": "cedadev/ndg_security_common",
"id": "e4472674720492bfe6c2bcff1ff37a3f464e3e64",
"size": "4386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ndg/security/common/test/unit/configfileparsers/test_configfileparsers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "34"
},
{
"name": "Python",
"bytes": "156596"
}
],
"symlink_target": ""
}
|
import unittest
from rmgpy.rmg.main import RMG
from rmgpy.rmg import input as inp
###################################################
def setUpModule(self):
"""
A method that is run before the class.
"""
# set-up RMG object and get global rmg object in input.py file
# so methods can be tested
global rmg
rmg = RMG()
inp.setGlobalRMG(rmg)
def tearDownModule(self):
# remove RMG object
global rmg
rmg = None
class TestInputDatabase(unittest.TestCase):
"""
Contains unit tests rmgpy.rmg.input.database
"""
def tearDown(self):
# remove the reactionLibraries value
global rmg
rmg.reactionLibraries = None
def testImportingDatabaseReactionLibrariesFromString(self):
"""
Test that we can import Reaction Libraries using the non-tuple form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=['test'])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertFalse(rmg.reactionLibraries[0][1])
def testImportingDatabaseReactionLibrariesFromFalseTuple(self):
"""
Test that we can import Reaction Libraries using the Tuple False form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=[('test',False)])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertFalse(rmg.reactionLibraries[0][1])
def testImportingDatabaseReactionLibrariesFromTrueTuple(self):
"""
Test that we can import Reaction Libraries using the Tuple True form.
"""
global rmg
# add database properties to RMG
inp.database(reactionLibraries=[('test',True)])
self.assertIsInstance(rmg.reactionLibraries[0], tuple)
self.assertTrue(rmg.reactionLibraries[0][1])
class TestInputThemoCentralDatabase(unittest.TestCase):
"""
Contains unit tests rmgpy.rmg.input.thermoCentralDatabase
"""
def tearDown(self):
# remove the reactionLibraries value
global rmg
rmg.thermoCentralDatabase = None
def testThemoCentralDatabase(self):
"""
Test that we can input.
"""
global rmg
# add database properties to RMG
inp.thermoCentralDatabase(
host='some_host',
port=0,
username='some_usr',
password='some_pw',
application='some_app'
)
self.assertEqual(rmg.thermoCentralDatabase.host, 'some_host')
self.assertEqual(rmg.thermoCentralDatabase.port, 0)
self.assertEqual(rmg.thermoCentralDatabase.username, 'some_usr')
self.assertEqual(rmg.thermoCentralDatabase.password, 'some_pw')
self.assertEqual(rmg.thermoCentralDatabase.application, 'some_app')
self.assertEqual(rmg.thermoCentralDatabase.client, None)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7080500e5e564508ebfb37b23fea2b3a",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 31.768421052631577,
"alnum_prop": 0.6328694499668654,
"repo_name": "Molecular-Image-Recognition/Molecular-Image-Recognition",
"id": "061d0fe039e14339332829bbd024a2704c33d3bf",
"size": "4505",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code/rmgpy/rmg/inputTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4715"
},
{
"name": "Python",
"bytes": "5599677"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import time
from time import mktime
import functions
from pytz import timezone
import authenticate
#s2module-bg s2time-off
def sameDay ( date, dayOfWeek, week, year ):
theDay = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % ("12", "00", dayOfWeek , week, year),"%H %M %w %W %Y")))
return theDay.date() == date.date()
def timetable( config, url, week, year, session = False ):
if session == False:
cookies = {}
else:
if session == True:
session = authenticate.authenticate(config)
# Insert the session information from the auth function
cookies = {
"lecmobile" : "0",
"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
"LastLoginUserName" : session["LastLoginUserName"],
"lectiogsc" : session["lectiogsc"],
"LectioTicket" : session["LectioTicket"]
}
# Sorting settings
settings = {
}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("table", attrs={"id" : "s_m_Content_Content_SkemaNyMedNavigation_skema_skematabel"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
# Fetch all rows in the table
rows = soup.find("table", attrs={"id" : "s_m_Content_Content_SkemaNyMedNavigation_skema_skematabel"}).findAll("tr")
# Fetch module info, to make it possible to draw a complete timetable
moduleInfo = []
moduleInfoProg = re.compile(r"(?P<module_number>.*)\. (?P<start_time>.*) - (?P<end_time>.*)")
for row in soup.findAll("div", attrs={"class" : "s2module-info"}):
moduleInfoGroups = moduleInfoProg.match(row.text.strip().replace("modul", ""))
if not moduleInfoGroups is None:
start = moduleInfoGroups.group("start_time")
if len(start) < 5:
start = "0" + start
end = moduleInfoGroups.group("end_time")
if len(end) < 5:
end = "0" + end
moduleInfo.append({
"module" : moduleInfoGroups.group("module_number"),
"start" : start,
"end" : end
})
# Fetch the general information celss
generalInformationDays = rows[2].findAll("td")
generalInformation = []
holidayElements = []
# Loop through all the cells, and look for information
index = 0
for tdRow in generalInformationDays:
index = index+1
dayOfWeek = index-1
if dayOfWeek == 7:
dayOfWeek = 0
if index > 1:
row = tdRow.findAll("a")
# Loop over the link elements, in the cell
if not row == None and len(row) > 0:
for element in row:
# The time module uses "0" as the first week of the year
if int(week) == 1:
timeWeek = 0
else:
# Subtract one, because 0 is the first week
timeWeek = int(week)-1
date = time.strptime("%s %s %s" % (str(dayOfWeek),str(timeWeek), str(year)),"%w %W %Y")
content = element.find("div", attrs={"class" : "s2skemabrikcontent"}).findAll("span")[1]
div = element.find("div", attrs={"class" : "s2skemabrikcontent"})
href = None
# If the a tag has a href, fetch it
try:
href = element["href"]
except BaseException:
pass
if href == None:
generalInformation.append({
"message" : unicode(content.text),
"date" : datetime.fromtimestamp(mktime(date)),
"school_id" : str(config["school_id"]),
"branch_id" : str(config["branch_id"]),
"term" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
"week" : week,
"year" : year
})
else:
# Compile the regular expression
prog = re.compile(r"\/lectio\/(?P<school_id>[0-9]*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>[0-9]*)&(?P<prev_url>.*)")
activityGroups = prog.match(element["href"])
generalInformation.append({
"message" : unicode(content.text),
"activity_id" : activityGroups.group("activity_id"),
"status" : "changed" if "s2changed" in div["class"] else "cancelled" if "s2cancelled" in div["class"] else "normal",
"date" : datetime.fromtimestamp(mktime(date)),
"school_id" : str(config["school_id"]),
"branch_id" : str(config["branch_id"]),
"term" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
"week" : week,
"year" : year
})
# Find all the day elements
timeElements = []
headers = []
headerRows = rows[1].findAll("td")
headerRows.pop(0)
headerProg = re.compile(ur"(?P<day_name>.*) \((?P<day>.*)\/(?P<month>.*)\)")
for row in headerRows:
headerGroups = headerProg.match(row.text)
headerYear = year
if not headerGroups is None:
if int(week) == 1 and int(headerGroups.group("month")) == 12:
headerYear = str(int(year) - 1)
headers.append({
"day" : headerGroups.group("day_name"),
"date" : datetime.strptime("%s-%s-%s %s" % (functions.zeroPadding(headerGroups.group("day")), functions.zeroPadding(headerGroups.group("month")), headerYear, "12:00"), "%d-%m-%Y %H:%M")
})
dayElements = rows[3].findAll("td")
dayElements.pop(0)
# Loop over the days
index = 0
dayOfWeek = 1
for dayElement in dayElements:
# Increment the day
index = index+1
# Test
dayOfWeek = index
if dayOfWeek == 7:
dayOfWeek = 0
# The time module uses "0" as the first week of the year
if int(week) == 1:
timeWeek = 0
else:
# Subtract one, because 0 is the first week
timeWeek = int(week)-1
# Find all the "a" tags, representing timetable elements
timetableElements = dayElement.findAll("a")
moduleIndex = 1
for checkElement in dayElement.findAll(attrs={"class" : "s2module-bg"}):
if "s2time-off" in checkElement["class"]:
# Get time from module info elements
holidayElements.append({
"start" : datetime.strptime("%s-%s-%s %s" % (headers[index-1]["date"].strftime("%d"), headers[index-1]["date"].strftime("%m"), headers[index-1]["date"].strftime("%Y"), moduleInfo[moduleIndex-1]["start"]), "%d-%m-%Y %H:%M"),
"end" : datetime.strptime("%s-%s-%s %s" % (headers[index-1]["date"].strftime("%d"), headers[index-1]["date"].strftime("%m"), headers[index-1]["date"].strftime("%Y"), moduleInfo[moduleIndex-1]["end"]), "%d-%m-%Y %H:%M")
})
moduleIndex = moduleIndex + 1
# Loop over the timetable elements
for timetableElement in timetableElements:
#The type of the event, "private" or "school"
type = None
# Locate the different types of information in the url, and find the different RegEx groups
expressions = [
{"type" : "private", "expression" : r"\/lectio\/(?P<school_id>[0-9]*)\/privat_aftale.aspx\?aftaleid=(?P<activity_id>[0-9]*)"},
{"type" : "school", "expression" : r"\/lectio\/(?P<school_id>[0-9]*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>[0-9]*)&(?P<prev_url>.*)"},
{"type" : "outgoing_censor", "expression" : r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=udgcensur&outboundCensorID=(?P<outbound_censor_id>.*)&prevurl=(?P<prev_url>.*)"},
{"type" : "exam", "expression" : r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=proevehold&ProeveholdId=(?P<test_team_id>.*)&prevurl=(?P<prev_url>.*)"}
]
# Loop over the expressions
groups = []
type = "other"
for expressionObject in expressions:
prog = re.compile(expressionObject["expression"])
if prog.match(timetableElement["href"]):
groups = prog.match(timetableElement["href"])
type = expressionObject["type"]
# Locate the status div
div = timetableElement.find("div", attrs={"class" : "s2skemabrikcontent"})
# A list of the teachers
teachers = []
# A list of the assigned teams
teams = []
# Find all the info span elements
infoSpanObjects = timetableElement.findAll("span")
# Loop over the Info spans
for span in infoSpanObjects:
id = None
# Test if property exists
try:
id = span["lectiocontextcard"]
except BaseException:
pass
if not id == None:
# Team
if span["lectiocontextcard"][0] == "H":
# Append the team
teams.append({
"context_card_id" : span["lectiocontextcard"],
"title" : unicode(span.text),
"team_id" : span["lectiocontextcard"].replace("HE", "")
})
# Teacher
elif span["lectiocontextcard"][0] == "T":
teachers.append({
"abbrevation" : unicode(span.text),
"context_card_id" : span["lectiocontextcard"],
"teacher_id" : span["lectiocontextcard"].replace("T", "")
})
# Get the titletext where to extract start and end times from
title = timetableElement["title"]
# Match the title, to extract the start and end time
timeProg = re.compile(r"(?P<start_hour>[0-9]*):(?P<start_minute>[0-9]*) til (?P<end_hour>[0-9]*):(?P<end_minute>[0-9]*)")
timeGroups = timeProg.search(unicode(title).encode("utf8"), re.MULTILINE)
# Get the "main sections" separated by a double return \n\n
mainSections = title.split("\n\n")
# Grab the top section and split it by a single return \n
topSection = mainSections[0].split("\n")
# Initialize variables, assume that nothing is cancelled or changed
isChangedOrCancelled = 0
isCancelled = False
isChanged = False
# If the first item in the top section doesn't contain 'til',
# it must be either cancelled or changed
if not "til" in topSection[0]:
isChangedOrCancelled = 1
# If it says 'Aflyst!'
if "Aflyst!" in topSection[0]:
# It must be cancelled
isCancelled = True
else:
# Otherwise it must be changed
isChanged = True
if not timeGroups is None:
startTime = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % (timeGroups.group("start_hour"),timeGroups.group("start_minute"), dayOfWeek , timeWeek, year),"%H %M %w %W %Y")))
endTime = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % (timeGroups.group("end_hour"),timeGroups.group("end_minute"), dayOfWeek , timeWeek, year),"%H %M %w %W %Y")))
else:
# Grab the date sections, fx: "15/5-2013 15:30 til 17:00"
dateSections = topSection[0+isChangedOrCancelled].split(" ")
# Grab the date, being the first (0) section
if len(dateSections) == 4:
startDateSection = dateSections[0]
endDateSection = dateSections[0]
startTimeSection = dateSections[1]
endTimeSection = dateSections[3]
else:
startDateSection = dateSections[0]
endDateSection = dateSections[3]
startTimeSection = dateSections[1]
endTimeSection = dateSections[4]
currentTimezone = timezone("Europe/Copenhagen")
alternativeDayProg = re.compile(r"(?P<day>[0-9]*)/(?P<month>[0-9]*)-(?P<year>[0-9]*)")
alternativeStartDayGroups = alternativeDayProg.match(startDateSection.strip())
alternativeEndDayGroups = alternativeDayProg.match(endDateSection.strip())
startTime = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(alternativeStartDayGroups.group("day")), functions.zeroPadding(alternativeStartDayGroups.group("month")), alternativeStartDayGroups.group("year"), startTimeSection.strip()), "%d/%m-%Y %H:%M")
endTime = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(alternativeEndDayGroups.group("day")), functions.zeroPadding(alternativeEndDayGroups.group("month")), alternativeEndDayGroups.group("year"), endTimeSection.strip()), "%d/%m-%Y %H:%M")
roomText = ""
try:
if not "rer:" in topSection[3 + isChangedOrCancelled]:
room = topSection[3 + isChangedOrCancelled].strip("Lokale: ").encode('utf-8').replace("r: ","")
except IndexError:
pass
if sameDay(startTime, dayOfWeek, timeWeek, year):
if type == "private":
timeElements.append({
"text" : unicode(timetableElement.text),
"activity_id" : groups.group("activity_id"),
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"school_id" : groups.group("school_id")
})
elif type == "outgoing_censor":
timeElements.append({
"text" : unicode(timetableElement.text),
"outbound_censor_id" : groups.group("outbound_censor_id"),
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"school_id" : groups.group("school_id")
})
elif type == "exam":
timeElements.append({
"text" : unicode(timetableElement.text),
"test_team_id" : groups.group("test_team_id"),
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"school_id" : groups.group("school_id")
})
elif type == "school":
# Add to the list
timeElements.append({
"text" : unicode(timetableElement.text),
"activity_id" : groups.group("activity_id"),
"status" : "changed" if "s2changed" in div["class"] else "cancelled" if "s2cancelled" in div["class"] else "normal",
"teachers" : teachers,
"teams" : teams,
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"location_text" : unicode(div.text),
"room_text" : unicode(roomText),
"school_id" : groups.group("school_id")
})
return {
"status" : "ok",
"timetable" : timeElements,
"information" : generalInformation,
"module_info" : moduleInfo,
"headers" : headers,
"term" : {
"value" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
"years_string" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
}
}
|
{
"content_hash": "9f65ebc151959fc52bce833925471cab",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 264,
"avg_line_length": 35.060759493670886,
"alnum_prop": 0.6383132356126796,
"repo_name": "boh1996/LectioAPI",
"id": "b2c70b523220a03296421b4c2798e5dfdd471441",
"size": "13891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/timetable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "396682"
}
],
"symlink_target": ""
}
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test tls server certificate verification options
'''
# need Curl
Test.SkipUnless(
Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")
)
# Define default ATS
ts = Test.MakeATSProcess("ts", select_ports=False)
server_foo = Test.MakeOriginServer("server_foo", ssl=True, options = {"--key": "{0}/signed-foo.key".format(Test.RunDirectory), "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)})
server_bar = Test.MakeOriginServer("server_bar", ssl=True, options = {"--key": "{0}/signed-bar.key".format(Test.RunDirectory), "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)})
server_wild = Test.MakeOriginServer("server_wild", ssl=True, options = {"--key": "{0}/wild.key".format(Test.RunDirectory), "--cert": "{0}/wild-signed.pem".format(Test.RunDirectory)})
server = Test.MakeOriginServer("server", ssl=True)
request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bad_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: badfoo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bad_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: badbar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server_foo.addResponse("sessionlog.json", request_foo_header, response_header)
server_foo.addResponse("sessionlog.json", request_bad_foo_header, response_header)
server_bar.addResponse("sessionlog.json", request_bar_header, response_header)
server_bar.addResponse("sessionlog.json", request_bad_bar_header, response_header)
server_wild.addResponse("sessionlog.json", request_bar_header, response_header)
# add ssl materials like key, certificates for the server
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.addSSLfile("ssl/signer.key")
ts.addSSLfile("ssl/wild.key")
ts.addSSLfile("ssl/wild-signed.pem")
ts.Variables.ssl_port = 4443
ts.Disk.remap_config.AddLine(
'map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map https://foo.wild.com/ https://127.0.0.1:{0}'.format(server_wild.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map https://foo_bar.wild.com/ https://127.0.0.1:{0}'.format(server_wild.Variables.SSL_Port))
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
# Case 1, global config policy=permissive properties=signature
# override for foo.com policy=enforced properties=all
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
# enable ssl port
'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port),
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
# set global policy
'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE',
'proxy.config.ssl.client.verify.server.properties': 'SIGNATURE',
'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.CA.cert.filename': 'signer.pem',
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.url_remap.pristine_host_hdr': 1
})
ts.Disk.ssl_server_name_yaml.AddLines([
'- fqdn: bar.com',
' verify_server_policy: ENFORCED',
' verify_server_properties: ALL',
'- fqdn: "*.wild.com"',
' verify_server_policy: ENFORCED',
' verify_server_properties: ALL',
'- fqdn: bad_bar.com',
' verify_server_policy: ENFORCED',
' verify_server_properties: ALL'
])
tr = Test.AddTestRun("Permissive-Test")
tr.Setup.Copy("ssl/signed-foo.key")
tr.Setup.Copy("ssl/signed-foo.pem")
tr.Setup.Copy("ssl/signed-bar.key")
tr.Setup.Copy("ssl/signed-bar.pem")
tr.Setup.Copy("ssl/wild-signed.pem")
tr.Setup.Copy("ssl/wild.key")
tr.Processes.Default.Command = "curl -v -k -H \"host: foo.com\" https://127.0.0.1:{0}".format(ts.Variables.ssl_port)
tr.ReturnCode = 0
# time delay as proxy.config.http.wait_for_cache could be broken
tr.Processes.Default.StartBefore(server_foo)
tr.Processes.Default.StartBefore(server_bar)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(server_wild)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr2 = Test.AddTestRun("Override-enforcing-Test")
tr2.Processes.Default.Command = "curl -v -k -H \"host: bar.com\" https://127.0.0.1:{0}".format(ts.Variables.ssl_port)
tr2.ReturnCode = 0
tr2.StillRunningAfter = server
tr2.StillRunningAfter = ts
tr2.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr3 = Test.AddTestRun("Override-enforcing-Test-fail-name-check")
tr3.Processes.Default.Command = "curl -v -k -H \"host: bad_bar.com\" https://127.0.0.1:{0}".format(ts.Variables.ssl_port)
tr3.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Curl attempt should have failed")
tr3.ReturnCode = 0
tr3.StillRunningAfter = server
tr3.StillRunningAfter = ts
tr4 = Test.AddTestRun("Exercise-wildcard-cert-name-check")
tr4.Processes.Default.Command = "curl -v -k -H \"host: foo.wild.com\" https://127.0.0.1:{0}".format(ts.Variables.ssl_port)
tr4.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr4.ReturnCode = 0
tr4.StillRunningAfter = server
tr4.StillRunningAfter = ts
tr5 = Test.AddTestRun("Exercise-wildcard-cert-underscore-name-check")
tr5.Processes.Default.Command = "curl -v -k -H \"host: foo_bar.wild.com\" https://127.0.0.1:{0}".format(ts.Variables.ssl_port)
tr5.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr5.ReturnCode = 0
tr5.StillRunningAfter = server
tr5.StillRunningAfter = ts
# Over riding the built in ERROR check since we expect tr3 to fail
ts.Disk.diags_log.Content = Testers.ExcludesExpression("verification failed", "Make sure the signatures didn't fail")
ts.Disk.diags_log.Content += Testers.ContainsExpression("WARNING: SNI \(bad_bar.com\) not in certificate", "Make sure bad_bar name checked failed.")
|
{
"content_hash": "c33223802f5204e62150e5fc852498c1",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 332,
"avg_line_length": 53.496815286624205,
"alnum_prop": 0.7313965948327181,
"repo_name": "chitianhao/trafficserver",
"id": "52571e6c5ccac7ce5ee91fe64b3cdc7bfca2d86f",
"size": "8399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gold_tests/tls/tls_verify.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1481933"
},
{
"name": "C++",
"bytes": "12818807"
},
{
"name": "CMake",
"bytes": "18505"
},
{
"name": "Dockerfile",
"bytes": "3283"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "JavaScript",
"bytes": "1609"
},
{
"name": "Lex",
"bytes": "4029"
},
{
"name": "Lua",
"bytes": "39258"
},
{
"name": "M4",
"bytes": "187086"
},
{
"name": "Makefile",
"bytes": "195501"
},
{
"name": "Objective-C",
"bytes": "15182"
},
{
"name": "Perl",
"bytes": "110166"
},
{
"name": "Python",
"bytes": "705967"
},
{
"name": "Shell",
"bytes": "119499"
},
{
"name": "Vim script",
"bytes": "192"
},
{
"name": "Yacc",
"bytes": "3255"
},
{
"name": "sed",
"bytes": "131"
}
],
"symlink_target": ""
}
|
"""## Variables
@@Variable
## Variable helper functions
TensorFlow provides a set of functions to help manage the set of variables
collected in the graph.
@@global_variables
@@local_variables
@@model_variables
@@trainable_variables
@@moving_average_variables
@@global_variables_initializer
@@local_variables_initializer
@@variables_initializer
@@is_variable_initialized
@@report_uninitialized_variables
@@assert_variables_initialized
@@assign
@@assign_add
@@assign_sub
## Saving and Restoring Variables
@@Saver
@@latest_checkpoint
@@get_checkpoint_state
@@update_checkpoint_state
## Sharing Variables
TensorFlow provides several classes and operations that you can use to
create variables contingent on certain conditions.
@@get_variable
@@VariableScope
@@variable_scope
@@variable_op_scope
@@get_variable_scope
@@make_template
@@no_regularizer
@@constant_initializer
@@random_normal_initializer
@@truncated_normal_initializer
@@random_uniform_initializer
@@uniform_unit_scaling_initializer
@@zeros_initializer
@@ones_initializer
## Variable Partitioners for Sharding
@@fixed_size_partitioner
@@variable_axis_size_partitioner
@@min_max_variable_partitioner
## Sparse Variable Updates
The sparse update ops modify a subset of the entries in a dense `Variable`,
either overwriting the entries or adding / subtracting a delta. These are
useful for training embedding models and similar lookup-based networks, since
only a small subset of embedding vectors change in any given step.
Since a sparse update of a large tensor may be generated automatically during
gradient computation (as in the gradient of
[`tf.gather`](../../api_docs/python/array_ops.md#gather)),
an [`IndexedSlices`](#IndexedSlices) class is provided that encapsulates a set
of sparse indices and values. `IndexedSlices` objects are detected and handled
automatically by the optimizers in most cases.
@@scatter_update
@@scatter_add
@@scatter_sub
@@scatter_mul
@@scatter_div
@@scatter_nd_update
@@scatter_nd_add
@@scatter_nd_sub
@@sparse_mask
@@IndexedSlices
### Read-only Lookup Tables
@@initialize_all_tables
## Exporting and Importing Meta Graphs
@@export_meta_graph
@@import_meta_graph
# Deprecated functions (removed after 2017-03-02). Please don't use them.
@@all_variables
@@initialize_all_variables
@@initialize_local_variables
@@initialize_variables
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
# pylint: enable=wildcard-import
# pylint: disable=protected-access
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
set_shape: If True, set the shape property of the returned Tensor to
the shape argument.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.
"""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
# NOTE(mrry): Shapes are conditionally set in the Python wrapper.
ops.RegisterShape("Variable")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsVariableInitialized")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("TemporaryVariable")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("DestroyTemporaryVariable")(common_shapes.call_cpp_shape_fn)
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
"""
with ops.name_scope(None, v.op.name + "/", [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), "Variable shape unknown."
# TODO(mrry): Convert to v.shape when the property and
# accessor are reconciled (and all initializers support
# tf.TensorShape objects).
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name="value")
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name="init")
return gen_state_ops.assign(v, init, name=scope)
ops.RegisterShape("Assign")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AssignAdd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AssignSub")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("CountUpTo")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ScatterAdd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ScatterDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ScatterMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ScatterSub")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ScatterUpdate")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("ScatterNdAdd")
@ops.RegisterShape("ScatterNdSub")
@ops.RegisterShape("ScatterNdMul")
@ops.RegisterShape("ScatterNdDiv")
@ops.RegisterShape("ScatterNdUpdate")
def scatter_nd_update_shape(op):
"""Shape function for the ScatterNd update ops."""
ref_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
updates_shape = op.inputs[2].get_shape()
if indices_shape.ndims is not None and ref_shape.ndims is not None:
outer_dims = len(indices_shape) - 1
ixdim = indices_shape[-1].value or 0
if not indices_shape[:outer_dims].is_compatible_with(
updates_shape[:outer_dims]):
raise ValueError("The outer %d dimensions of indices.shape=%s must "
"match the outer %d dimensions of updates.shape=%s" % (
outer_dims, indices_shape, outer_dims,
updates_shape))
if not ref_shape[ixdim:].is_compatible_with(updates_shape[outer_dims:]):
raise ValueError("The inner %d dimensions of ref.shape=%s must match "
"the inner %d dimensions of updates.shape=%s" % (
len(ref_shape)-ixdim, ref_shape,
len(updates_shape)-outer_dims, updates_shape))
return [ref_shape]
|
{
"content_hash": "0b7792b1ec31b055b93e4c83e84cc9a9",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 79,
"avg_line_length": 32.16033755274262,
"alnum_prop": 0.716478614536867,
"repo_name": "jeffzheng1/tensorflow",
"id": "e196bdd3ff9999d89bdfcc73f8bbf56b825fe7a4",
"size": "8312",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/state_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2980"
},
{
"name": "C",
"bytes": "94360"
},
{
"name": "C++",
"bytes": "13843068"
},
{
"name": "CMake",
"bytes": "93604"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "85550"
},
{
"name": "HTML",
"bytes": "525038"
},
{
"name": "Java",
"bytes": "56007"
},
{
"name": "JavaScript",
"bytes": "12235"
},
{
"name": "Jupyter Notebook",
"bytes": "1833475"
},
{
"name": "Makefile",
"bytes": "23468"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "142519"
},
{
"name": "Python",
"bytes": "13178852"
},
{
"name": "Shell",
"bytes": "262797"
},
{
"name": "TypeScript",
"bytes": "727019"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0058_bandsawblade_heading'),
]
operations = [
migrations.AddField(
model_name='bandsawblade',
name='cols',
field=models.CharField(blank=True, max_length=1024, verbose_name='Spalten'),
),
]
|
{
"content_hash": "cc6e621fe1f82405e5535bec0f352b1a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 88,
"avg_line_length": 23.38888888888889,
"alnum_prop": 0.6128266033254157,
"repo_name": "n2o/guhema",
"id": "c21ec19e3b1c8edd2d802083f5a6d18baf79ea06",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "products/migrations/0059_bandsawblade_cols.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12685"
},
{
"name": "HTML",
"bytes": "158857"
},
{
"name": "Python",
"bytes": "151643"
}
],
"symlink_target": ""
}
|
import re
import ast
from setuptools import setup, find_packages
requires = [
'awscli>=1.16.10,<2.0.0',
'prompt-toolkit>=1.0.0,<1.1.0',
'boto3>=1.9.0,<2.0.0',
'configobj>=5.0.6,<6.0.0',
'Pygments>=2.1.3,<3.0.0',
]
with open('awsshell/__init__.py', 'r') as f:
version = str(
ast.literal_eval(
re.search(
r'__version__\s+=\s+(.*)',
f.read()).group(1)))
setup(
name='aws-shell',
version=version,
description='AWS Shell',
long_description=open('README.rst').read(),
author='James Saryerwinnie',
url='https://github.com/awslabs/aws-shell',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
package_data={'awsshell': ['data/*/*.json',
'awsshellrc']},
install_requires=requires,
entry_points={
'console_scripts': [
'aws-shell = awsshell:main',
'aws-shell-mkindex = awsshell.makeindex:main',
]
},
license="Apache License 2.0",
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
),
)
|
{
"content_hash": "d251ff51a45d6a09bda53bd8cbb7add4",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 61,
"avg_line_length": 29.25862068965517,
"alnum_prop": 0.5527401296405421,
"repo_name": "awslabs/aws-shell",
"id": "5696838833891aecbc31df2b985633fb7022eaef",
"size": "1719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1002"
},
{
"name": "Python",
"bytes": "154806"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
from os.path import dirname
from os.path import isfile
import sys
from docassemble.base.mako import exceptions
from docassemble.base.mako.lookup import TemplateLookup
from docassemble.base.mako.template import Template
def varsplit(var):
if "=" not in var:
return (var, "")
return var.split("=", 1)
def _exit():
sys.stderr.write(exceptions.text_error_template().render())
sys.exit(1)
def cmdline(argv=None):
parser = ArgumentParser()
parser.add_argument(
"--var",
default=[],
action="append",
help="variable (can be used multiple times, use name=value)",
)
parser.add_argument(
"--template-dir",
default=[],
action="append",
help="Directory to use for template lookup (multiple "
"directories may be provided). If not given then if the "
"template is read from stdin, the value defaults to be "
"the current directory, otherwise it defaults to be the "
"parent directory of the file provided.",
)
parser.add_argument(
"--output-encoding", default=None, help="force output encoding"
)
parser.add_argument(
"--output-file",
default=None,
help="Write to file upon successful render instead of stdout",
)
parser.add_argument("input", nargs="?", default="-")
options = parser.parse_args(argv)
output_encoding = options.output_encoding
output_file = options.output_file
if options.input == "-":
lookup_dirs = options.template_dir or ["."]
lookup = TemplateLookup(lookup_dirs)
try:
template = Template(
sys.stdin.read(),
lookup=lookup,
output_encoding=output_encoding,
)
except:
_exit()
else:
filename = options.input
if not isfile(filename):
raise SystemExit("error: can't find %s" % filename)
lookup_dirs = options.template_dir or [dirname(filename)]
lookup = TemplateLookup(lookup_dirs)
try:
template = Template(
filename=filename,
lookup=lookup,
output_encoding=output_encoding,
)
except:
_exit()
kw = dict(varsplit(var) for var in options.var)
try:
rendered = template.render(**kw)
except:
_exit()
else:
if output_file:
open(output_file, "wt", encoding=output_encoding).write(rendered)
else:
sys.stdout.write(rendered)
if __name__ == "__main__":
cmdline()
|
{
"content_hash": "9b23aa58846bc93b97569a915657e8ae",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 27.91578947368421,
"alnum_prop": 0.5871040723981901,
"repo_name": "jhpyle/docassemble",
"id": "acaea652779af39653eb93aae85777902a5ecd38",
"size": "2865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docassemble_base/docassemble/base/mako/cmd.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "156"
},
{
"name": "CSS",
"bytes": "87682"
},
{
"name": "Dockerfile",
"bytes": "6731"
},
{
"name": "Emacs Lisp",
"bytes": "3393"
},
{
"name": "Gherkin",
"bytes": "518252"
},
{
"name": "HTML",
"bytes": "176555"
},
{
"name": "JavaScript",
"bytes": "1903341"
},
{
"name": "Mako",
"bytes": "1042"
},
{
"name": "Python",
"bytes": "4402435"
},
{
"name": "Rich Text Format",
"bytes": "120028"
},
{
"name": "Shell",
"bytes": "146777"
},
{
"name": "TeX",
"bytes": "15582"
}
],
"symlink_target": ""
}
|
import sys
from utils.cli import shell
from utils.notify import info, error
import config
"""Sets up the necessary AWS configurations
Creates repository if it doesnt exists
"""
def get_repository_uri(repository):
"""Checks if a repository is created already
Arguments:
repository {string} -- Name of the repository
Returns:
mixed -- Repository URI or None if not found
"""
res = shell('aws ecr describe-repositories --output text --repository-names %s' % config.PROJECT)
lines = res.split('\n')
repo = None
for line in lines:
if '/' + repository in line:
repo = line
break
if not repo:
return None
return line.split('\t')[-1]
def create_repository(repository):
"""Creates ECR repository
Arguments:
repository {string} -- Name of the repository
Returns:
string -- Repository URI
"""
return shell('aws ecr create-repository --repository-name %s --output text' % repository
).split('\t')[-1]
try:
info('Starting deploy!')
# Create the repository if it doesnt exists already
uri = get_repository_uri(config.PROJECT)
if not uri:
uri = create_repository(config.PROJECT)
info('Repository %s created' % config.PROJECT)
config.REPOSITORY = uri
except Exception, ex:
error('Could not create repository', ex)
sys.exit(2)
|
{
"content_hash": "ae5688470bd3a5939ff3c078da04cad4",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 101,
"avg_line_length": 23.15,
"alnum_prop": 0.652267818574514,
"repo_name": "JoelRoxell/heimdall",
"id": "02ded6fe0e46ef59d271c2c675845de62d47cb91",
"size": "1413",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deploy/ecs/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "238"
},
{
"name": "JavaScript",
"bytes": "21243"
},
{
"name": "Python",
"bytes": "12137"
},
{
"name": "Shell",
"bytes": "1877"
}
],
"symlink_target": ""
}
|
import datetime
import re
import unicodedata
import pytz
from dontbelate import settings
MAX_DEEPLINK_LENGTH = 40
def generate_slug(s_in):
s = unicode(s_in)
s = unicodedata.normalize('NFD', s).encode('ascii', 'ignore')
s = re.sub(r'[^a-zA-Z0-9\ \-_]', '', s.lower()) # strip non alphanumeric
s = re.sub(r'[-\s]+', '-', s) # convert spaces to hyphens
# and remove repeating hyphens
s = s[:MAX_DEEPLINK_LENGTH]
if s.isdigit():
s = "s_%s" % s
return s
def valid_slug(value):
match = re.match('^[-a-zA-Z0-9_]+\Z', value)
return bool(match)
def local_to_utc(date_time):
"""
Convert local datetime string to UTC
Remove tzinfo to be able to save to Datastore right away
Args:
date_time: str
Returns:
naive utc datetime.datetime
"""
dt = datetime.datetime.strptime(date_time, '%Y-%m-%d %H:%M')
tz = pytz.timezone(settings.DEFAULT_TIME_ZONE)
local_dt = tz.localize(dt)
return local_dt.astimezone(pytz.utc).replace(microsecond=0, tzinfo=None)
def utc_to_local(date_time):
"""
Convert UTC datetime to local datetime
Args:
date_time: utc datetime.datetime
Returns:
naive local datetime.datetime
"""
tz = pytz.timezone(settings.DEFAULT_TIME_ZONE)
local_dt = date_time.replace(tzinfo=pytz.utc, microsecond=0).astimezone(tz)
return local_dt.replace(tzinfo=None)
|
{
"content_hash": "137e202d6c9b470e64b9ca4e542f8ebf",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 24.93103448275862,
"alnum_prop": 0.6224066390041494,
"repo_name": "svleeuwen/dont-be-late-appengine",
"id": "e9b59157b56029a40755c30597f364eff9ae61b3",
"size": "1446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/generic/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2686"
},
{
"name": "HTML",
"bytes": "168"
},
{
"name": "JavaScript",
"bytes": "6906"
},
{
"name": "Python",
"bytes": "85578"
},
{
"name": "Shell",
"bytes": "4849"
},
{
"name": "Smarty",
"bytes": "15026"
}
],
"symlink_target": ""
}
|
from __future__ import division
from __future__ import print_function
import multiprocessing as mp
import time
from pysabertooth import Sabertooth
from smc import SMC
from pygecko import ZmqClass as zmq
# from pygecko import Messages as Msg
class Motion(mp.Process):
def __init__(self):
mp.Process.__init__(self)
def run(self):
smc = SMC('/dev/tty.usbserial0')
smc.init()
smc.stop() # make sure we are stopped?
saber = Sabertooth('/dev/tty.usbserial1')
saber.stop() # make sure we are stopped?
# pub = zmq.Pub() # do i need to feedback motor errors?
sub = zmq.Sub(['cmd', 'dome'])
while True:
topic, msg = sub.recv()
if msg:
if topic == 'cmd':
print('got cmd')
elif topic == 'dome':
print('got dome')
else:
time.sleep(0.5)
def main():
print('--- Starting Motion ---')
motion = Motion()
try:
motion.start()
except KeyboardInterrupt:
print('<<<<<<<< keyboard >>>>>>>>>>>')
motion.join()
motion.terminate()
if __name__ == "__main__":
main()
|
{
"content_hash": "201de9e9f1bac3eb764ad1ce1598a3be",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 58,
"avg_line_length": 19.576923076923077,
"alnum_prop": 0.6286836935166994,
"repo_name": "DFEC-R2D2/r2d2",
"id": "6e046f1efd8e69572d7fc3bcfb53eebc78eb35fc",
"size": "1041",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "archives/old_2017-2018_code/code/gecko/Motion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "77068"
},
{
"name": "HTML",
"bytes": "60835"
},
{
"name": "JavaScript",
"bytes": "22044"
},
{
"name": "Python",
"bytes": "319317"
},
{
"name": "Shell",
"bytes": "2422"
}
],
"symlink_target": ""
}
|
from abc import abstractmethod
import copy
import re
from sqlalchemy import Float, Integer, text, exists, select, or_
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.sql import column
from sqlalchemy.sql.operators import is_
from dart.context.locator import injectable
from dart.model.exception import DartValidationException
from dart.model.query import Operator, Filter
@injectable
class FilterService(object):
def __init__(self):
self._operator_handlers = {
Operator.EQ: OperatorEquals(),
Operator.NE: OperatorNotEquals(),
Operator.LT: OperatorLessThan(),
Operator.LE: OperatorLessThanOrEquals(),
Operator.GT: OperatorGreaterThan(),
Operator.GE: OperatorGreaterThanOrEquals(),
Operator.IN: OperatorIn(),
Operator.NOT_LIKE: OperatorNotLike(),
Operator.LIKE: OperatorLike(),
Operator.SEARCH: OperatorSearch(),
}
def from_string(self, f_string):
pattern = re.compile(r'\s*(\S+?)\s*(' + '|'.join(self._operator_handlers.keys()) + ')\s*(\S+)\s*')
m = pattern.match(f_string)
try:
return Filter(m.group(1), m.group(2), m.group(3))
except:
raise DartValidationException('could not parse filter: %s' % f_string)
def apply_filter(self, f, query, dao, schemas):
""" :type f: dart.model.query.Filter """
op = self._operator_handlers[f.operator]
if f.key in ['id', 'created', 'updated']:
return query.filter(op.evaluate(lambda v: v, getattr(dao, f.key), str, f.value))
# at this point, assume we are dealing with a data/JSONB filter
path_keys = f.key.split('.')
filters = []
visited = {}
for schema in schemas:
type_, array_indexes = self._get_type(path_keys, schema)
identifier = type_ + '@' + str(array_indexes)
if identifier in visited:
continue
visited[identifier] = 1
key_groups = self.get_key_groups(array_indexes, path_keys)
last_is_array = array_indexes[-1] == len(path_keys) - 1 if len(array_indexes) > 0 else False
filters.append(self.expr(0, 'data', dao.data, key_groups, type_, f.value, op, last_is_array))
return query.filter(filters[0]) if len(filters) == 1 else query.filter(or_(*filters))
def expr(self, i, alias, col, key_groups, t, v, op, last_is_array):
if i < len(key_groups) - 1:
subq, c = self.get_subquery(alias, i, key_groups)
subq = subq.where(self.expr(i + 1, 'dart_a_%s.value' % i, c, key_groups, t, v, op, last_is_array))
return exists(subq)
if last_is_array:
subq, c = self.get_subquery(alias, i, key_groups, True)
subq = subq.where(op.evaluate(lambda x: x, c, _python_cast(t), v))
return exists(subq)
return op.evaluate(_pg_cast(t), col[key_groups[i]], _python_cast(t), v)
@staticmethod
def get_subquery(alias, i, key_groups, as_text=False):
c = column('value', JSONB)
bindvars = {'dart_var_%s' % i: '{' + ','.join(key_groups[i]) + '}'}
suffix = '_text' if as_text else ''
from_expr = text('jsonb_array_elements%s(%s #> :dart_var_%s) as dart_a_%s' % (suffix, alias, i, i))
from_expr = from_expr.bindparams(**bindvars)
subq = select([c]).select_from(from_expr)
return subq, c
@staticmethod
def get_key_groups(array_indexes, path_keys):
if len(array_indexes) <= 0 or len(path_keys) == 1:
return [path_keys]
array_groups = []
prev = 0
for i in array_indexes:
array_groups.append(path_keys[prev:i + 1])
prev = i + 1
array_groups.append(path_keys[prev:])
return array_groups
@staticmethod
def _get_type(path_keys, schema):
array_indexes = []
s = schema['properties']['data']
for i, key in enumerate(path_keys):
if not s:
break
if 'object' in s['type']:
s = s['properties'].get(key)
continue
if 'array' in s['type']:
array_indexes.append(i - 1)
s = s['items'].get('properties', {}).get(key)
continue
if not s:
return 'string', array_indexes
type_ = s['type']
if isinstance(type_, list):
pt_copy = copy.copy(type_)
pt_copy.remove('null')
type_ = pt_copy[0]
if type_ == 'array':
array_indexes.append(len(path_keys) - 1)
return type_, array_indexes
class OperatorEvaluator(object):
@abstractmethod
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
raise NotImplementedError
class OperatorEquals(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return lhs_cast(lhs) == rhs_cast(rhs)
class OperatorNotEquals(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return or_(lhs_cast(lhs) != rhs_cast(rhs), is_(lhs_cast(lhs), None))
class OperatorLessThan(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return lhs_cast(lhs) < rhs_cast(rhs)
class OperatorLessThanOrEquals(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return lhs_cast(lhs) <= rhs_cast(rhs)
class OperatorGreaterThan(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return lhs_cast(lhs) > rhs_cast(rhs)
class OperatorGreaterThanOrEquals(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return lhs_cast(lhs) >= rhs_cast(rhs)
class OperatorIn(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return lhs_cast(lhs).in_([rhs_cast(v) for v in rhs.split(',')])
class OperatorNotLike(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return lhs_cast(lhs).notilike(rhs_cast(rhs))
class OperatorLike(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
return lhs_cast(lhs).ilike(rhs_cast(rhs))
class OperatorSearch(OperatorEvaluator):
def evaluate(self, lhs_cast, lhs, rhs_cast, rhs):
only_alphanum = re.sub(r'\W+', '', rhs)
search_string = '%' + '%'.join(only_alphanum) + '%'
return lhs_cast(lhs).ilike(search_string)
def _pg_cast(js_type):
if js_type == 'integer':
return lambda v: v.cast(Integer)
if js_type == 'number':
return lambda v: v.cast(Float)
return lambda v: v.astext
def _python_cast(js_type):
if js_type == 'integer':
return int
if js_type == 'number':
return float
return str
|
{
"content_hash": "5f516a0e4303498affda531889c3dea6",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 110,
"avg_line_length": 35.118556701030926,
"alnum_prop": 0.5953324526640247,
"repo_name": "RetailMeNotSandbox/dart",
"id": "575d843f17c55f41df506f3efff6a2671ce99c8d",
"size": "6813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/dart/service/filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103727"
},
{
"name": "HTML",
"bytes": "67636"
},
{
"name": "JavaScript",
"bytes": "2762304"
},
{
"name": "Nginx",
"bytes": "996"
},
{
"name": "PLpgSQL",
"bytes": "1475"
},
{
"name": "Python",
"bytes": "1025954"
},
{
"name": "Ruby",
"bytes": "5523"
},
{
"name": "Shell",
"bytes": "3100"
}
],
"symlink_target": ""
}
|
from imagequery import Format, register_format, NewImageQuery
class BackendPreviewThumbnail(Format):
def execute(self, query):
return query.scale(700, 200).query_name('backend_preview_thumbnail')
register_format('backend_preview_thumbnail', BackendPreviewThumbnail)
|
{
"content_hash": "102e99c2dc7401d538b1edc556c8cb9c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 76,
"avg_line_length": 40,
"alnum_prop": 0.7857142857142857,
"repo_name": "team23/django_backend",
"id": "2523f72a19c45c337380b3fa5c6ee3c8597ffe75",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_backend/image_formats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "70141"
},
{
"name": "Groff",
"bytes": "160"
},
{
"name": "HTML",
"bytes": "220610"
},
{
"name": "JavaScript",
"bytes": "707306"
},
{
"name": "Python",
"bytes": "168182"
},
{
"name": "Ruby",
"bytes": "1515"
},
{
"name": "Shell",
"bytes": "53"
}
],
"symlink_target": ""
}
|
r"""Goal-Finding and clustering combined task.
To demo this task, navigate to the main directory and run the following:
'''
$ python demo --config=spriteworld.configs.examples.goal_finding_clustering \
--task_hsv_colors=False
'''
This is a complicated task designed only to exemplify the features of the task
specification procedures.
In this task there are three kinds of sprites:
1) Those to be clustered. These are triangles, squares, and pentagons. They must
be clustered according to their color.
2) Those to be brought to goal regions. These are 4-spokes and 4-stars. They
must be brought to different sides of the arena according to their color.
Namely, the reddish ones must be brought to the right side of the arena and the
greenish ones must be brought to the left side of the arena (the y-position is
irrelevant).
3) Distractors. These are circles.
There is a train/test split: In test mode, the colors of the objects to be
clustered and the scales of those to be brought to goals are different.
Note that the colors in this task are defined in RGB space, so be sure when
running the demo on it to set --task_hsv_colors=False.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from spriteworld import action_spaces
from spriteworld import factor_distributions as distribs
from spriteworld import renderers as spriteworld_renderers
from spriteworld import sprite_generators
from spriteworld import tasks
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: Unused task mode.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
# Factor distributions common to all objects.
common_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Continuous('angle', 0, 360, dtype='int32'),
])
# train/test split for goal-finding object scales and clustering object colors
goal_finding_scale_test = distribs.Continuous('scale', 0.08, 0.12)
green_blue_colors = distribs.Product([
distribs.Continuous('c1', 64, 256, dtype='int32'),
distribs.Continuous('c2', 64, 256, dtype='int32'),
])
if mode == 'train':
goal_finding_scale = distribs.SetMinus(
distribs.Continuous('scale', 0.05, 0.15),
goal_finding_scale_test,
)
cluster_colors = distribs.Product(
[distribs.Continuous('c0', 128, 256, dtype='int32'), green_blue_colors])
elif mode == 'test':
goal_finding_scale = goal_finding_scale_test
cluster_colors = distribs.Product(
[distribs.Continuous('c0', 0, 128, dtype='int32'), green_blue_colors])
else:
raise ValueError(
'Invalid mode {}. Mode must be "train" or "test".'.format(mode))
# Create clustering sprite generators
sprite_gen_list = []
cluster_shapes = [
distribs.Discrete('shape', [s])
for s in ['triangle', 'square', 'pentagon']
]
for shape in cluster_shapes:
factors = distribs.Product([
common_factors,
cluster_colors,
shape,
distribs.Continuous('scale', 0.08, 0.12),
])
sprite_gen_list.append(
sprite_generators.generate_sprites(factors, num_sprites=2))
# Create goal-finding sprite generators
goal_finding_colors = [
distribs.Product([
distribs.Continuous('c0', 192, 256, dtype='int32'),
distribs.Continuous('c1', 0, 128, dtype='int32'),
distribs.Continuous('c2', 64, 128, dtype='int32'),
]),
distribs.Product([
distribs.Continuous('c0', 0, 128, dtype='int32'),
distribs.Continuous('c1', 192, 256, dtype='int32'),
distribs.Continuous('c2', 64, 128, dtype='int32'),
])
]
# Goal positions corresponding to the colors in goal_finding_colors
goal_finding_positions = [(0., 0.5), (1., 0.5)]
goal_finding_shapes = distribs.Discrete('shape', ['spoke_4', 'star_4'])
for colors in goal_finding_colors:
factors = distribs.Product([
common_factors,
goal_finding_scale,
goal_finding_shapes,
colors,
])
sprite_gen_list.append(
sprite_generators.generate_sprites(
factors, num_sprites=lambda: np.random.randint(1, 3)))
# Create distractor sprite generator
distractor_factors = distribs.Product([
common_factors,
distribs.Discrete('shape', ['circle']),
distribs.Continuous('c0', 64, 256, dtype='uint8'),
distribs.Continuous('c1', 64, 256, dtype='uint8'),
distribs.Continuous('c2', 64, 256, dtype='uint8'),
distribs.Continuous('scale', 0.08, 0.12),
])
sprite_gen_list.append(sprite_generators.generate_sprites(
distractor_factors, num_sprites=lambda: np.random.randint(0, 3)))
# Concat clusters into single scene to generate
sprite_gen = sprite_generators.chain_generators(*sprite_gen_list)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
# Create the combined task of goal-finding and clustering
task_list = []
task_list.append(
tasks.Clustering(cluster_shapes, terminate_bonus=0., reward_range=10.))
for colors, goal_pos in zip(goal_finding_colors, goal_finding_positions):
goal_finding_task = tasks.FindGoalPosition(
distribs.Product([colors, goal_finding_shapes]),
goal_position=goal_pos,
weights_dimensions=(1, 0),
terminate_distance=0.15,
raw_reward_multiplier=30)
task_list.append(goal_finding_task)
task = tasks.MetaAggregated(
task_list, reward_aggregator='sum', termination_criterion='all')
renderers = {
'image':
spriteworld_renderers.PILRenderer(
image_size=(64, 64), anti_aliasing=5)
}
config = {
'task': task,
'action_space': action_spaces.SelectMove(scale=0.5),
'renderers': renderers,
'init_sprites': sprite_gen,
'max_episode_length': 50,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
{
"content_hash": "bb5190ba4f9f8de4fc34214f75120f0d",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 80,
"avg_line_length": 35.664739884393065,
"alnum_prop": 0.6774716369529984,
"repo_name": "deepmind/spriteworld",
"id": "22fb425ee3f0e07d1e40f74ea55777ea8b2e06ad",
"size": "6861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spriteworld/configs/examples/goal_finding_clustering.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "202192"
}
],
"symlink_target": ""
}
|
__author__ = 'walter'
import requests
from xml.etree import ElementTree as ET
OCS_ADMIN = 'ocs/v1.php/cloud'
class StatusCodeException(Exception):
def __init__(self,status,statusCode,message):
self.status = status
self.statusCode = statusCode
self.message = message
def __str__(self):
errMsg = self.status
if self.message != '':
errMsg = ':'+self.status
return repr(errMsg)
class Response(object):
def __init__(self,textResponse):
#init parse
self.__resxml = ET.fromstring(textResponse)
self.__meta = self.__resxml.find('meta')
self.__status = self.__meta.find('status').text
self.__statuscode = self.__meta.find('statuscode').text
if self.__meta.find('message') is not None:
self.__message = self.__meta.find('message').text
@property
def status(self):
return self.__status
@property
def statuscode(self):
return int(self.__statuscode)
@property
def message(self):
return getattr(self,'__message','')
def getElementData(self):
data = self.__resxml.find('data')
return data
def getData(self):
pass
class Client(object):
def __init__(self,url,username, password,**kwargs):
if not url[-1] == '/':
url = url + '/'
self.__ocs_admin = kwargs.get('ocs_admin', OCS_ADMIN)
self.url = url + self.__ocs_admin
self.__auth = (username,password)
self.__debug = kwargs.get('debug', False)
def __cast(self,tag):
if tag in ['displayname','email','quota']:
return str
elif tag in ['']:
return int
elif tag in ['']:
return float
elif tag in ['enabled']:
return bool
def __compileUrl(self,apiName,*args,**kwargs):
url = self.url + '/' + apiName
if len(args) > 0:
url += '/' + '/'.join(args)
return url
def __makeRequest(self,apiName,*args,**kwargs):
s = requests.Session()
reqMethod = kwargs.get('method','GET')
reqData = kwargs.get('data',{})
req = requests.Request(reqMethod,self.__compileUrl(apiName,*args,**kwargs),
auth=self.__auth,
data=reqData
)
r = s.send(req.prepare())
r.raise_for_status()
res = Response(r.text)
if res.statuscode != 100:
raise StatusCodeException(res.status,res.statuscode,res.message)
return res
@property
def auth(self):
return self.__auth
def getUsers(self):
toRet = []
res = self.__makeRequest('users')
data = res.getElementData()
for u in list(data.find('users')):
toRet.append(u.text)
return toRet
def getUser(self,userName):
toRet = {}
res = self.__makeRequest('users',userName)
data = res.getElementData()
for u in list(data):
toRet[u.tag] = self.__cast(u.tag)(u.text)
return toRet
|
{
"content_hash": "ee7b9fd95fcbc779ba2117389224271c",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 83,
"avg_line_length": 28.0990990990991,
"alnum_prop": 0.5405578711125361,
"repo_name": "wlorenzetti/owncloudAdminClient",
"id": "af8a7cf8144c494338d53766036f441ad6783666",
"size": "3119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "owncloudadmin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5065"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(0,'../')
sys.path.insert(0,'./')
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "annotation.settings")
from markpic.models import KeyWord5K,Picture5K
words_list=[]
for line in open("../markpic/words"):
line=line[:-1]
words_list.append(line)
imagename_list=[]
for line in open("./image_nums"):
line=line[:-1]
line=line+".jpeg"
imagename_list.append(line)
wordsrelation_list=[]
wordsrelation=[]
for line in open("document_words"):
line=line[:line.find("-99")]
wordsrelation=line.split()
wordsrelation_list.append(wordsrelation)
#print wordsrelation
print len(wordsrelation)
#from markpic.models import PictureCorel,KeyWordCorel
for word in words_list:
worddata=KeyWord5K(keyname=word)
worddata.save()
#print imagename_list
for imagename in imagename_list:
# print imagename
pic=Picture5K.objects.get(picname=imagename)
if pic.picid>=len(wordsrelation_list):
continue
for word in wordsrelation_list[pic.picid]:
keyword=KeyWord5K.objects.get(keyid=word)
picture=Picture5K.objects.get(picname=imagename)
keyword.pictures.add(picture)
# keyword=KeyWordCorel(keyname=word,picture=pic.picid)
|
{
"content_hash": "f112bbbcdf1f9c4353d8a0684e46412d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 70,
"avg_line_length": 27.177777777777777,
"alnum_prop": 0.7162714636140638,
"repo_name": "matrixorz/justpic",
"id": "fd6943033eb92d476c7defa2af9a21b0ad7310a6",
"size": "1259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "justpic/etc/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2609566"
},
{
"name": "C++",
"bytes": "12154"
},
{
"name": "CSS",
"bytes": "123382"
},
{
"name": "D",
"bytes": "10427"
},
{
"name": "Frege",
"bytes": "6414"
},
{
"name": "JavaScript",
"bytes": "350048"
},
{
"name": "Python",
"bytes": "215536"
}
],
"symlink_target": ""
}
|
import argparse
import os
import shlex
import subprocess
import sys
gopath = os.environ['GOPATH']
integration_command = """docker run --rm \
-e "GOPATH=/gopath" \
-v """ + gopath + """:/gopath \
-w /gopath/src/github.com/amadeovezz/gobro/db \
--network=db_default \
golang:1.7.1 \
go test -v"""
def run_command(command):
split_command = shlex.split(command)
process = subprocess.Popen(split_command, stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, ''):
sys.stdout.write(line)
def start_compose_environment():
print "Spinning up mysql container for testing:\n"
run_command("docker-compose -f " + gopath + "/src/github.com/amadeovezz/gobro/db/docker-compose.yml up -d")
def stop_compose_environment():
print "Taking down mysql container:\n"
run_command("docker-compose -f " + gopath + "/src/github.com/amadeovezz/gobro/db/docker-compose.yml down")
def run_integration_test():
print "\nRunning database integration tests:\n"
print "------------------------------------------"
run_command(integration_command)
print "------------------------------------------\n"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test db package')
parser.add_argument('--up', action="store_true", help='bring up docker compose environment')
parser.add_argument('--down', action="store_true", help='take down docker compose environment')
parser.add_argument('--full', action="store_true", help='run test suite from scratch and clean up after')
parser.add_argument('--test', action="store_true", help='run test suite assuming compose environment is up')
args = parser.parse_args()
if args.up:
start_compose_environment()
elif args.down:
stop_compose_environment()
elif args.full:
start_compose_environment()
run_integration_test()
stop_compose_environment()
elif args.test:
run_integration_test()
|
{
"content_hash": "3794fbbfb77ea5aa511e072c1f77531f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 112,
"avg_line_length": 36.175438596491226,
"alnum_prop": 0.6231813773035888,
"repo_name": "amadeovezz/gobro",
"id": "f03152646f110bfd41aa36a6d4e31815b4004ccf",
"size": "2086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db/test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "19555"
},
{
"name": "Python",
"bytes": "7298"
}
],
"symlink_target": ""
}
|
'''
A Tornado-inspired logging formatter, with displayed time with millisecond accuracy
FYI: pyftpdlib also has a Tornado-style logger.
'''
import sys
import time
import logging
class Colors:
def __init__(self, color=None):
if color is None:
color = support_color()
if color:
import curses
curses.setupterm()
if sys.hexversion < 0x30203f0:
fg_color = str(curses.tigetstr("setaf") or
curses.tigetstr("setf") or "", "ascii")
else:
fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or b""
self.blue = str(curses.tparm(fg_color, 4), "ascii")
self.yellow = str(curses.tparm(fg_color, 3), "ascii")
self.green = str(curses.tparm(fg_color, 2), "ascii")
self.red = str(curses.tparm(fg_color, 1), "ascii")
self.bright_red = str(curses.tparm(fg_color, 9), "ascii")
self.normal = str(curses.tigetstr("sgr0"), "ascii")
else:
self.blue = self.yellow = self.green = self.red = self.bright_red = self.normal = ""
class TornadoLogFormatter(logging.Formatter):
def __init__(self, color, *args, **kwargs):
super().__init__(*args, **kwargs)
self._color = color
if color:
colors = Colors(color=color)
self._colors = {
logging.DEBUG: colors.blue,
logging.INFO: colors.green,
logging.WARNING: colors.yellow,
logging.ERROR: colors.red,
logging.CRITICAL: colors.bright_red,
}
self._normal = colors.normal
def format(self, record):
try:
record.message = record.getMessage()
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = time.strftime(
"%m-%d %H:%M:%S", self.converter(record.created))
prefix = '[%(levelname)1.1s %(asctime)s.%(msecs)03d %(module)s:%(lineno)d]' % \
record.__dict__
if self._color:
prefix = (self._colors.get(record.levelno, self._normal) +
prefix + self._normal)
formatted = prefix + " " + record.message
formatted += ''.join(
' %s=%s' % (k, v) for k, v in record.__dict__.items()
if k not in {
'levelname', 'asctime', 'module', 'lineno', 'args', 'message',
'filename', 'exc_info', 'exc_text', 'created', 'funcName',
'processName', 'process', 'msecs', 'relativeCreated', 'thread',
'threadName', 'name', 'levelno', 'msg', 'pathname', 'stack_info',
})
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
formatted = formatted.rstrip() + "\n" + record.exc_text
return formatted.replace("\n", "\n ")
def support_color(stream=sys.stderr):
if stream.isatty():
try:
import curses
curses.setupterm()
if curses.tigetnum("colors") > 0:
return True
except:
import traceback
traceback.print_exc()
return False
def enable_pretty_logging(level=logging.DEBUG, handler=None, color=None):
'''
handler: specify a handler instead of default StreamHandler
color: boolean, force color to be on / off. Default to be on only when
``handler`` isn't specified and the term supports color
'''
logger = logging.getLogger()
if handler is None:
h = logging.StreamHandler()
else:
h = handler
if color is None and handler is None:
color = support_color()
formatter = TornadoLogFormatter(color=color)
h.setLevel(level)
h.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(h)
|
{
"content_hash": "7500890bb783476438b9ee94c2b1e1a6",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 90,
"avg_line_length": 32.60550458715596,
"alnum_prop": 0.6178953292065279,
"repo_name": "lilydjwg/nvchecker",
"id": "5f586bca8f3d9a372dc8e9555af63422d245cf6a",
"size": "3634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nvchecker/lib/nicelogger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146836"
}
],
"symlink_target": ""
}
|
from PIL import Image
import glob
from os import path
import config
# read existing blacklist
if path.exists(config.path_blacklist):
with open(path.join(config.path_base, 'blacklist.txt'), 'r') as f:
blacklist = f.readlines()
else:
blacklist = []
# try opening all images, save error-generating images to blacklist
for filename in glob.glob(path.join(config.path_img, '*')): #assuming gif
try:
im=Image.open(filename)
except OSError:
basename = path.basename(filename)
print('Adding {} to blacklist'.format(basename))
blacklist.append(basename)
# write resulting blacklist to disk
with open(path.join(config.path_base, 'blacklist.txt'), 'w') as f:
blacklist = f.write('\n'.join(blacklist) + '\n')
|
{
"content_hash": "f0fb8637c3f7a3e6c62dd9bdb210bc22",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6855263157894737,
"repo_name": "rtlee9/food-GAN",
"id": "369e9e006a6de4beb1437bda42924d66cb639141",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blacklist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22465"
},
{
"name": "Shell",
"bytes": "103"
}
],
"symlink_target": ""
}
|
import json
import yaml
import sys
import os
yamlpath = sys.argv[1]
jsonpath = os.path.splitext(yamlpath)[0] + '.json'
print(jsonpath)
with open(yamlpath, 'r') as yp:
# BaseLoader because our parser does not yet support
# constructors or resolvers
data = yaml.load(yp, Loader=yaml.BaseLoader)
with open(jsonpath, 'w') as jp:
json.dump(data, jp, sort_keys=True, indent=2)
|
{
"content_hash": "5406582d02b16935e39aef7ce3b94db7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 21.77777777777778,
"alnum_prop": 0.7040816326530612,
"repo_name": "davidlazar/go-libyaml",
"id": "6ef5f10e82658b4151ca8d394631e969301db8ea",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yaml2json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "4308"
},
{
"name": "Python",
"bytes": "454"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import logging
# Imports from core django
# Imports from third party apps
from rest_framework import mixins
from rest_framework.viewsets import GenericViewSet
from rest_framework_bulk.mixins import BulkCreateModelMixin
# Local imports
class BulkCreateViewSet(BulkCreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericViewSet):
pass
|
{
"content_hash": "1e4c2783e681caea90e2ebbc1d298e36",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 59,
"avg_line_length": 30.05263157894737,
"alnum_prop": 0.660245183887916,
"repo_name": "ephes/ml_jobcontrol",
"id": "12fc77306f391fdce8f8cd8f8c98388066eb3fa4",
"size": "624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml_jobcontrol/ml_jobcontrol/viewsets.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "132531"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
from django import forms
from django.utils.translation import gettext_lazy as _
class QueryForm(forms.Form):
query_string = forms.CharField(
label=_("Search term(s)/phrase"),
help_text=_(
"Enter the full search string to match. An "
"exact match is required for your Promoted Results to be "
"displayed, wildcards are NOT allowed."
),
required=True,
)
|
{
"content_hash": "1d612597b0da5c5f9059beeacb7bc1d9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 30.714285714285715,
"alnum_prop": 0.6186046511627907,
"repo_name": "zerolab/wagtail",
"id": "95a72462cd61a3022f24925c3cd08dc9840bf42b",
"size": "430",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/search/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593037"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6560334"
},
{
"name": "SCSS",
"bytes": "219204"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288102"
}
],
"symlink_target": ""
}
|
import zmq
# ZeroMQ Context
context = zmq.Context()
# Define the socket using the "Context"
sock = context.socket(zmq.REP)
sock.bind("tcp://127.0.0.1:5678")
# Run a simple "Echo" server
while True:
message = sock.recv()
sock.send("Echo: " + message)
|
{
"content_hash": "42a72c9660c98e82e89c18f08ad74bdc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 39,
"avg_line_length": 20.076923076923077,
"alnum_prop": 0.6743295019157088,
"repo_name": "ajayaa/python-akhada",
"id": "79e4ea95ba33a2bf6da4dbfbe1b6c025e8a65561",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zeromq/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1504"
},
{
"name": "Python",
"bytes": "12720"
}
],
"symlink_target": ""
}
|
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/automl_v1beta1/proto/ranges.proto",
package="google.cloud.automl.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n\037com.google.cloud.automl.v1beta1B\013RangesProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1"
),
serialized_pb=_b(
'\n.google/cloud/automl_v1beta1/proto/ranges.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto")\n\x0b\x44oubleRange\x12\r\n\x05start\x18\x01 \x01(\x01\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x01\x42\xb2\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x0bRangesProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3'
),
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR],
)
_DOUBLERANGE = _descriptor.Descriptor(
name="DoubleRange",
full_name="google.cloud.automl.v1beta1.DoubleRange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="start",
full_name="google.cloud.automl.v1beta1.DoubleRange.start",
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="end",
full_name="google.cloud.automl.v1beta1.DoubleRange.end",
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=109,
serialized_end=150,
)
DESCRIPTOR.message_types_by_name["DoubleRange"] = _DOUBLERANGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DoubleRange = _reflection.GeneratedProtocolMessageType(
"DoubleRange",
(_message.Message,),
dict(
DESCRIPTOR=_DOUBLERANGE,
__module__="google.cloud.automl_v1beta1.proto.ranges_pb2",
__doc__="""A range between two double numbers.
Attributes:
start:
Start of the range, inclusive.
end:
End of the range, exclusive.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DoubleRange)
),
)
_sym_db.RegisterMessage(DoubleRange)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "35da364205cb87fefdf47ed078608eb9",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 460,
"avg_line_length": 32.919642857142854,
"alnum_prop": 0.6463249254136154,
"repo_name": "tswast/google-cloud-python",
"id": "6f2121baf69ed7cc52d3b15b34fe4c356b7cb126",
"size": "3828",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "automl/google/cloud/automl_v1beta1/proto/ranges_pb2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
}
|
import argparse
from mapgen.generate_symbolset import (
generate_symbolset,
symbolsets,
update_file,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--update', '-u',
help='Update the symbol definition files',
action='store_true')
parser.add_argument('symbolset', help='Symbolset to generate',
choices=symbolsets + ('all',))
parser.add_argument('destination', help='Dataset destination folder')
args = parser.parse_args()
update_file('chartsymbols.xml', force=args.update)
if args.symbolset == 'all':
for symbolset in symbolsets:
generate_symbolset(symbolset, args.destination, args.update)
else:
generate_symbolset(args.symbolset, args.destination, args.update)
|
{
"content_hash": "8d552187f817c4125fc352d770add63c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 35.25,
"alnum_prop": 0.6276595744680851,
"repo_name": "LarsSchy/SMAC-M",
"id": "ddb7b997f26cec15fdd686b2697c74a5ce17ee4e",
"size": "870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chart-installation/generate_map_files/generate_symbolset.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266268"
},
{
"name": "Shell",
"bytes": "39116"
}
],
"symlink_target": ""
}
|
from itertools import izip
import numpy as np
__author__ = "Gokul Ramesh"
__copyright__ = "Copyright 2014, LatentView Analytics Pvt. Ltd."
__email__ = "gokul.ramesh@latentview.com"
__status__ = "Development"
__version__="0.0.1"
def nNGrams(iterable, nList):
if nList == [-1]:
return [[iterable]]
return [izip(*[iterable[i:] for i in range(n)]) for n in nList]
def levenshtein(source, target):
if len(source) < len(target):
return levenshtein(target, source)
if len(target) == 0:
return len(source)
source = np.array(tuple(source))
target = np.array(tuple(target))
previous_row = np.arange(target.size + 1)
for s in source:
current_row = previous_row + 1
current_row[1:] = np.minimum(
current_row[1:],
np.add(previous_row[:-1], target != s))
current_row[1:] = np.minimum(
current_row[1:],
current_row[0:-1] + 1)
previous_row = current_row
return previous_row[-1]
|
{
"content_hash": "e96ea325c43071add517bd0f78c85440",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 67,
"avg_line_length": 25.29268292682927,
"alnum_prop": 0.5756991321118612,
"repo_name": "mrgokul/aggcluster",
"id": "8e3f179f6f5d77162cc528b4a9bb798bcc4a0f43",
"size": "1037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cluster/util/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "260934"
},
{
"name": "Python",
"bytes": "29606"
}
],
"symlink_target": ""
}
|
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
SOURCE_INDEX = '.kibana'
TARGET_INDEX = 'osg-connect-kibana'
def get_es_client():
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=ES_NODES,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'.kibana ' +
'to osg-connect-kibana')
args = parser.parse_args(sys.argv[1:])
client = get_es_client()
results = elasticsearch.helpers.reindex(client,
SOURCE_INDEX,
TARGET_INDEX,
scroll='30m')
sys.stdout.write(str(results))
|
{
"content_hash": "367e6ae023108f9ef57d31100195a8df",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 32.878787878787875,
"alnum_prop": 0.4838709677419355,
"repo_name": "DHTC-Tools/logstash-confs",
"id": "8b0ede56aaa89acf36944e5eb4956523efdaf5f5",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "condor/python/resync_dashboards.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "90309"
},
{
"name": "R",
"bytes": "12118"
},
{
"name": "Shell",
"bytes": "13452"
}
],
"symlink_target": ""
}
|
import roslib
roslib.load_manifest('smart_arm_controller')
import rospy
from std_msgs.msg import Float64
from std_msgs.msg import Float64MultiArray
from numpy import matrix
import numpy as np
import sys
import sensor_msgs.msg
from copy import copy, deepcopy
#The alternating 2 desired positions for demo
desired_positions = [
[0.0285793, 0.0, 0.369884],
[0.0285793, 0.0, 0.469884],
[0.1285793, 0.0, 0.469884],
[0.1285793, 0.0, 0.369884]
]
if __name__ == '__main__':
pubs = rospy.Publisher('pos_for_IK', Float64MultiArray, queue_size=5)
rospy.init_node('position_for_IK', anonymous=True)
rate = rospy.Rate(0.2) #once every 5 secs
pos_idx = 0
while not rospy.is_shutdown():
pos_idx += 1
if pos_idx >= len(desired_positions):
pos_idx = 0
msg = Float64MultiArray()
msg.data = deepcopy(desired_positions[pos_idx])
pubs.publish(msg)
rate.sleep()
|
{
"content_hash": "d129b5851592557eb145e83bbd8a64b7",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 23.95,
"alnum_prop": 0.6440501043841336,
"repo_name": "sigproc/robotic_surgery",
"id": "b36d9bfd8220ac39cfc672979618ac8a30805fd7",
"size": "981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ros/kinematics_smart/arm_moveit_generated/src/pos_for_IK.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "95098"
},
{
"name": "CMake",
"bytes": "87588"
},
{
"name": "Makefile",
"bytes": "6460"
},
{
"name": "Python",
"bytes": "208268"
},
{
"name": "Shell",
"bytes": "1502"
}
],
"symlink_target": ""
}
|
""" Provides a way of sleeping safely.
"""
from time import sleep as systemsleep
from time import time
import select
from brisa.core import log
def tsleep(timeout, death_fd):
if death_fd:
# Interruptable sleep
read, dummy1, dummy2 = select.select([death_fd], [], [], timeout)
if read:
return False
else:
systemsleep(timeout)
return True
def safe_sleep(t, death_fd=None):
""" Implementation of a simple smart sleep function. This function makes
sure the whole sleep time is slept properly.
@param t: sleep time
@type t: integer
"""
if t < 1:
# We don't need this precision for sleeping. time.sleep() gives almost
# two decimal points of precision in sleeping
return tsleep(t, death_fd)
remaining = t
# For an interval of 1600 seconds, the rate will be 1600/10.0 = 160 s.
# For an interval of 10 seconds, the rate will be 10/10 = 1 second.
rate = t / 10.0
# Precision of 0.001. For example, if we have an interval of 1600 s,
# then the precision will be 1600/1000 = 1.6 seconds.
# For an interval of 10 seconds, the precision will be 10/1000 = 0.01 s.
precision = t / 1000.0
initial = time()
# If remaining is in the interval (0, precision), then leave
while remaining > precision:
# Measure the interval (t0, t1) and estimate the remaining
t0 = time()
if rate < remaining:
if not tsleep(rate, death_fd):
return False
else:
# If rate is bigger than the remaining time, then just sleep the
# remaining. We don't want to sleep more than the interval.
if remaining > 0:
if not tsleep(remaining, death_fd):
return False
else:
# If negative, already slept too much, so, set it to zero
remaining = 0
t1 = time()
remaining -= (t1-t0)
final = time()
return True
|
{
"content_hash": "aa4801a31dd3808ba9e1620bf11e961e",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 29.61764705882353,
"alnum_prop": 0.5988083416087389,
"repo_name": "aleixq/python3-brisa",
"id": "67b82e1c26bde9c3d8668440379b24da822ace8e",
"size": "2184",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "brisa/utils/safe_sleep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "497333"
}
],
"symlink_target": ""
}
|
from pattern import *
Patterns = {}
def check_jid_for_matches(jid, Patterns = None):
if not Patterns:
return False
jid = JIDPattern.split(jid)
for pattern in Patterns:
if pattern == jid:
return pattern
return False
def check_nick_for_matches(nick, Patterns = None):
if not Patterns:
return False
for pattern in Patterns:
if pattern == nick:
return pattern
return False
def pattern_04eh(chat, nick, afl, role, status, text):
jid = handler_jid(u"%s/%s" % (chat, nick), "full_jid")
if nick != handler_botnick(chat):
matched_nick = check_nick_for_matches(nick, Patterns[chat]["nick"].keys())
if matched_nick:
pattern_action(chat, nick, jid, matched_nick, "nick")
else:
matched_jid = check_jid_for_matches(jid, Patterns[chat]["jid"].keys())
if matched_jid:
pattern_action(chat, nick, jid, matched_jid, "jid")
def pattern_action(chat, nick, jid, matched, key):
action = Patterns[chat][key][matched]
Reason = "%s: Sorry, but your %s matches pattern: %s" % (handler_botnick(chat), key, matched.normalize())
if action == "kick":
kick(chat, nick, Reason)
elif action == "ban":
outcast(chat, jid, Reason)
elif action == "visitor":
visitor(chat, nick, Reason)
elif action == "member":
member(chat, jid, Reason)
def pattern_command(mType, source, body):
room = source[1]
if body:
body = body.split() # 0: add/del; 1: jid/nick; 2: *@jabber.ru; 3: kick/ban/member (lol)
if len(body) > 2:
What = body.pop(1)
foo = {"jid": (JIDPattern, check_jid_for_matches), "nick": (NickPattern, check_nick_for_matches)}
if What in foo:
Type = (body.pop(0)).lower()
Pattern_raw = body.pop(0)
if Type == "add":
if body:
Action = (body.pop(0)).lower()
if Action in ("visitor", "kick", "ban", "member"):
try:
Pattern = foo[What][0](Pattern_raw)
except AssertionError, text:
answer = str(text)
else:
isPatternExists = foo[What][1](Pattern_raw, Patterns[room][What].keys())
if not isPatternExists:
Patterns[room][What][Pattern] = Action
write_file("dynamic/%s/%s" % (room, RegFile), str(Patterns[room]))
answer = "Added: %(What)s match «%(Pattern_raw)s» → %(Action)s." % vars()
else:
answer = "Pattern «%s» already exists." % Pattern_raw
else:
answer = "Unknown action: %s." % action
else:
answer = "no body, no game"
elif Type == "del":
try:
Pattern = foo[What][1](Pattern_raw, Patterns[room][What].keys())
except AssertionError, text:
answer = str(text)
else:
if Pattern:
del Patterns[room][What][Pattern]
write_file("dynamic/%s/%s" % (room, RegFile), str(Patterns[room]))
answer = "ok."
else:
answer = "fail!"
else:
answer = "Undefined type!"
else:
answer = "Unknown parameter!"
else:
answer = "Need more body!"
else:
if Patterns.get(room):
List = {"jid": [], "nick": []}
nickPatterns = Patterns[room]["nick"]
jidPatterns = Patterns[room]["jid"]
for jPattern in sorted(jidPatterns.keys()):
normal = jPattern.normalize()
List["jid"].append("%s → %s" % (normal, jidPatterns[jPattern]))
for nPattern in sorted(nickPatterns.keys()):
normal = nPattern.normalize()
List["nick"].append("%s → %s" % (normal, nickPatterns[nPattern]))
answer = ""
if List["jid"]:
answer += "\n• JIDPatterns:\n"
answer += enumerated_list(List["jid"])
if List["nick"]:
answer += "\n\n• NickPatterns:\n"
answer += enumerated_list(List["nick"])
if not answer:
answer = "List is Empty." # You Are Empty dude!
reply(mType, source, answer)
RegFile = "regexp.base"
Patterns = {}
def pattern_01si(chat):
patterns = {"nick": {}, "jid": {}}
if check_file(chat, RegFile, str(patterns)):
patterns = eval(read_file("dynamic/%s/%s" % (chat, RegFile)))
Patterns[chat] = patterns
handler_register("01si", pattern_01si)
handler_register("04eh", pattern_04eh)
command_handler(pattern_command, 20, "regexp")
|
{
"content_hash": "f775ebbfcb3228547fc6968ccc912d87",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 106,
"avg_line_length": 31.421875,
"alnum_prop": 0.6210840377921432,
"repo_name": "Manazius/blacksmith-bot",
"id": "fc919b1a1c185848946d12cb5508209dfce1777d",
"size": "4151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extensions/regexp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13337"
},
{
"name": "HTML",
"bytes": "4311"
},
{
"name": "Python",
"bytes": "433322"
}
],
"symlink_target": ""
}
|
import argparse
from oslo_serialization import jsonutils
from osc_lib.command import command
from mistralclient.commands.v2 import base
from mistralclient import utils
class EnvironmentFormatter(base.MistralFormatter):
COLUMNS = [
('name', 'Name'),
('description', 'Description'),
('variables', 'Variables'),
('scope', 'Scope'),
('created_at', 'Created at'),
('updated_at', 'Updated at'),
]
LIST_COLUMN_FIELD_NAMES = [c[0] for c in COLUMNS if c[0] != 'variables']
LIST_COLUMN_HEADING_NAMES = [c[1] for c in COLUMNS if c[0] != 'variables']
@staticmethod
def format(environment=None, lister=False):
if lister:
columns = EnvironmentFormatter.LIST_COLUMN_HEADING_NAMES
else:
columns = EnvironmentFormatter.headings()
if environment:
data = (
environment.name,)
if hasattr(environment, 'description'):
data += (environment.description or '<none>',)
else:
data += (None,)
if not lister:
data += (jsonutils.dumps(environment.variables, indent=4),)
data += (
environment.scope,
environment.created_at,)
if hasattr(environment, 'updated_at'):
data += (environment.updated_at or '<none>',)
else:
data += (None,)
else:
data = (tuple('' for _ in range(len(columns))),)
return columns, data
class List(base.MistralLister):
"""List all environments."""
def _get_format_function(self):
return EnvironmentFormatter.format_list
def _get_resources(self, parsed_args):
mistral_client = self.app.client_manager.workflow_engine
return mistral_client.environments.list(
marker=parsed_args.marker,
limit=parsed_args.limit,
sort_keys=parsed_args.sort_keys,
sort_dirs=parsed_args.sort_dirs,
fields=EnvironmentFormatter.fields(),
**base.get_filters(parsed_args)
)
class Get(command.ShowOne):
"""Show specific environment."""
def get_parser(self, prog_name):
parser = super(Get, self).get_parser(prog_name)
parser.add_argument('environment', help='Environment name')
parser.add_argument(
'--export',
default=False,
action='store_true',
help='Export the environment suitable for import'
)
return parser
def take_action(self, parsed_args):
mistral_client = self.app.client_manager.workflow_engine
environment = mistral_client.environments.get(parsed_args.environment)
if parsed_args.export:
columns = ('name',
'description',
'scope',
'variables')
data = (environment.name,
environment.description,
environment.scope,
jsonutils.dumps(environment.variables))
return columns, data
return EnvironmentFormatter.format(environment)
class Create(command.ShowOne):
"""Create new environment."""
def get_parser(self, prog_name):
parser = super(Create, self).get_parser(prog_name)
parser.add_argument(
'file',
type=argparse.FileType('r'),
help='Environment configuration file in JSON or YAML'
)
return parser
def take_action(self, parsed_args):
data = utils.load_content(parsed_args.file.read())
mistral_client = self.app.client_manager.workflow_engine
environment = mistral_client.environments.create(**data)
return EnvironmentFormatter.format(environment)
class Delete(command.Command):
"""Delete environment."""
def get_parser(self, prog_name):
parser = super(Delete, self).get_parser(prog_name)
parser.add_argument(
'environment',
nargs='+',
help='Name of environment(s).'
)
return parser
def take_action(self, parsed_args):
mistral_client = self.app.client_manager.workflow_engine
utils.do_action_on_many(
lambda s: mistral_client.environments.delete(s),
parsed_args.environment,
"Request to delete environment %s has been accepted.",
"Unable to delete the specified environment(s)."
)
class Update(command.ShowOne):
"""Update environment."""
def get_parser(self, prog_name):
parser = super(Update, self).get_parser(prog_name)
parser.add_argument(
'file',
type=argparse.FileType('r'),
help='Environment configuration file in JSON or YAML'
)
return parser
def take_action(self, parsed_args):
data = utils.load_content(parsed_args.file.read())
mistral_client = self.app.client_manager.workflow_engine
environment = mistral_client.environments.update(**data)
return EnvironmentFormatter.format(environment)
|
{
"content_hash": "dfb61b13f8ab82ee9f71f0aae3514e0f",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 78,
"avg_line_length": 29.514285714285716,
"alnum_prop": 0.5862536302032914,
"repo_name": "openstack/python-mistralclient",
"id": "cdcb4e9ba06684465b2e4d6228156cc48510f368",
"size": "5775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistralclient/commands/v2/environments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "499835"
},
{
"name": "Shell",
"bytes": "4068"
}
],
"symlink_target": ""
}
|
import os
import io
from gensim import corpora, models, similarities
import re
from config import *
from nltk import sent_tokenize
from nltk import word_tokenize
__author__ = 'Memray'
import json
stopwords = []
def load_stopword(path):
stop_word = []
try:
stopword_file = open(path, 'r')
stop_word = [line.strip() for line in stopword_file]
except:
print('Error occurs when loading STOPWORD')
return stop_word
class Document:
def __init__(self, review, tip, user_id, business_id):
self.tip = tip
self.review = review
self.business_id = business_id
self.user_id = user_id
def to_json(self):
return json.dumps(self.__dict__)
@classmethod
def from_json(cls, json_str):
json_dict = json.loads(json_str)
return cls(**json_dict)
def generate_dict_corpus_all_review():
'''
generate the gensim dict&corpus on the whole review corpus
:return:
'''
print('Generating new dict and corpus on all Yelp reviews')
review_file = open(FULL_YELP_REVIEW_PATH, 'r')
# output_review = open("review.json", 'w')
# output_tip = open("tip.json", 'w')
texts = []
stoplist = load_stopword(STOPWORD_PATH)
count = 0
for line in review_file:
count += 1
if count % 10000 ==0:
print(count)
json_review = json.loads(line.strip())
text = json_review.get("text").decode('utf-8').lower()
# tokenize and clean. Split non-word&number: re.sub(r'\W+|\d+', '', word.decode('utf-8')). Keep all words:r'\d+'
tokens = [re.sub(r'\W+|\d+', '', word) for word in text.split()]
# remove stop words and short tokens
tokens = [token for token in tokens if ((not token.strip()=='') and (not token in stoplist))]
# stemming, experiment shows that stemming works nothing...
# if (stemming):
# stemmer = PorterStemmer()
# texts = [[ stemmer.stem(token) for token in text] for text in texts]
texts.append(tokens)
review_file.close()
# remove words that appear only once
# from collections import defaultdict
# frequency = defaultdict(int)
# for token in tokens:
# frequency[token] += 1
# for text in texts:
# tokens = []
# for token in text:
# if (frequency[token] > 1):
# tokens.append(token)
# text = tokens
# texts = [[token for token in text if (frequency[token] > 1)] for text in texts]
print('Corpus preprocessing and counting complished!')
dictionary = corpora.Dictionary(texts)
dictionary.filter_extremes(no_below=5)
dictionary.save(DICT_PATH) # store the dictionary, for future reference
dictionary.save_as_text(DICT_TXT_PATH)
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize(CORPUS_PATH, corpus) # store to disk, for later use
print('Generating dict and corpus complished!')
def load_dict_corpus_all_review():
'''
return the gensim dict&corpus on the whole review corpus
:return: dict&corpus
'''
if not (os.path.isfile(DICT_PATH) and os.path.isfile(CORPUS_PATH)):
generate_dict_corpus_all_review()
print('Reading dict & corpus')
dict = corpora.Dictionary.load(DICT_PATH)
corpus = corpora.MmCorpus(CORPUS_PATH)
print('Reading complicated')
return corpus, dict
def generate_summary_data():
document_list = []
'''
load tips
'''
input_file = open(FULL_YELP_TIP_PATH, 'r')
tip_dict = {}
for line in input_file:
json_tip = json.loads(line.strip())
tup = (json_tip.get("user_id"), json_tip.get("business_id"))
if tup not in tip_dict:
tip_dict[tup] = json_tip.get("text").strip().lower()
else:
tip_dict[tup] = tip_dict[tup] + json_tip.get("text").strip().lower()
input_file.close()
print("length: ", len(tip_dict))
# tip_file = open("tip_file.txt", "w")
# for k, v in my_dict.items():
# if len(v) >= 2:
# tip_file.write(str(k) + str(v) + "\n")
# tip_file.close()
'''
load reviews
'''
review_file = open(FULL_YELP_REVIEW_PATH, 'r', encoding='utf-8')
# output_review = open("review.json", 'w')
# output_tip = open("tip.json", 'w')
count_with_tip = 0
count_found_tip = 0
output_string = ''
for line in review_file:
json_review = json.loads(line.strip())
tup = (json_review.get("user_id"), json_review.get("business_id"))
text = json_review.get("text").lower()
# check whether this review contains a tip, ignore if not
if tup in tip_dict:
count_with_tip += 1
tip = tip_dict[tup]
doc = Document(text, tip, json_review.get("user_id"), json_review.get("business_id"))
document_list.append(doc)
if (text.find(tip) is not -1) and (not text == tip) :
count_found_tip += 1
output_string += doc.to_json()+'\n'
review_file.close()
# output_review.close()
# output_tip.close()
print(count_with_tip)
print(count_found_tip)
output_file = open(DATA_PATH, 'w')
output_file.write(output_string.decode('utf-8'))
output_file.close()
def load_yelp_training_data():
'''
Read the data and return the training data(summaries which contain a tip)
If file does not exist, generate and save to file in a Json format
:return:
'''
if not os.path.isfile(DATA_PATH):
print('Start to generate summary&tip data')
generate_summary_data()
print('Start to read summary&tip data')
doc_list = []
data_file = open(DATA_PATH, 'r')
for line in data_file:
doc = Document.from_json(line)
doc.tip = doc.tip.replace('\n','')
doc.review = doc.review.replace('\n','')
if len(doc.tip.strip())>20:
doc_list.append(doc)
return doc_list
def load_lda(corpus, dictionary):
'''
Load lda from file, or create/train a new one
:return:
'''
if not os.path.isfile(LDA_MODEL_PATH):
# Online LDA: extract 100 LDA topics, using 1 pass and updating once every 1 chunk (10,000 documents)
print('Creating LDA..')
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=50, update_every=1, chunksize=10000, passes=1)
# extract 100 LDA topics, using 20 full passes, no online updates
#lda = gensim.models.ldamodel.LdaModel(corpus=mm, id2word=id2word, num_topics=100, update_every=0, passes=20)
lda.save(LDA_MODEL_PATH)
print('LDA finished..')
print('Loading LDA model')
lda = models.LdaModel.load(LDA_MODEL_PATH)
return lda
def load_tfidf(corpus, dictionary):
if not os.path.isfile(TFIDF_MODEL_PATH):
print('Creating TF-IDF')
tfidf = models.TfidfModel(corpus)
print('TF-IDF created')
tfidf.save(TFIDF_MODEL_PATH)
print('Loading TF-IDF model')
tfidf = models.TfidfModel.load(TFIDF_MODEL_PATH)
return tfidf
# doc_list = get_data()
# print(len(doc_list))
def get_review_sentences():
'''
Read the yelp review and return after sentence segmentattion
:return:
'''
review_file = io.open(FULL_YELP_REVIEW_PATH, 'r', encoding='utf-8')
count_sentence = 0
sentences = []
for line in review_file:
json_review = json.loads(line.strip())
text = json_review.get("text").replace('\n','').lower()
raw_sentences = sent_tokenize(text)
for raw_sentence in raw_sentences:
if len(raw_sentence.strip()) > 0:
sent_tokens = word_tokenize(raw_sentence)
sentences.append(sent_tokens)
return sentences
def load_w2v(corpus, dictionary):
'''
Return the trained Word2Vec model
Train a model if model doesn't exist yet
:param corpus:
:param dictionary:
:return:
'''
if not os.path.isfile(W2V_MODEL_PATH):
num_features = 300 # Word vector dimensionality
min_word_count = 5 # Minimum word count
num_workers = 5 # Number of threads to run in parallel
window = 5 # Context window size
downsampling = 1e-5 # Downsample setting for frequent words
print("Training the word2vec model!")
sents = get_review_sentences()
# Initialize and train the model (this will take some time)
model = models.Word2Vec(sents, workers=num_workers, \
size=num_features, min_count = min_word_count, \
window = window, sample = downsampling)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model.save(W2V_MODEL_PATH)
tfidf = models.Word2Vec(corpus)
print('Word2vec model created!')
print('Loading word2vec model')
w2v = models.Word2Vec.load(W2V_MODEL_PATH)
print('Loading word2vec model complished!')
return w2v
def export_to_attention_model_data_format():
doc_list = load_yelp_training_data()
print(len(doc_list))
# summary_file = io.open(ATTENTION_MODEL_SUMMARY_PATH, 'w', encoding='utf-8')
# document_file = io.open(ATTENTION_MODEL_DOCUMENT_PATH, 'w', encoding='utf-8')
document_summary_file = io.open(ATTENTION_MODEL_DOCUMENT_SUMMARY_PATH, 'w', encoding='utf-8')
for doc in doc_list:
# summary_file.write(doc.tip+'\n')
# document_file.write(doc.review+'\n')
document_summary_file.write(doc.tip+'\t'+doc.review+'\n')
# summary_file.close()
# document_file.close()
document_summary_file.close()
export_to_attention_model_data_format()
|
{
"content_hash": "0223047ee9b293c81bb3e2221be94f77",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 131,
"avg_line_length": 32.801980198019805,
"alnum_prop": 0.6131401549451655,
"repo_name": "fujunswufe/YelpDataChallenge",
"id": "e41f13dc262c2f9bd5dbba04aeb13764e4e5d77d",
"size": "9957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/summarize/reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4631"
},
{
"name": "R",
"bytes": "1210"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.auth.handlers.modwsgi import (
check_password, groups_for_user,
)
from django.contrib.auth.models import Group, User
from django.test import TransactionTestCase, override_settings
from .models import CustomUser
# This must be a TransactionTestCase because the WSGI auth handler performs
# its own transaction management.
class ModWsgiHandlerTestCase(TransactionTestCase):
"""
Tests for the mod_wsgi authentication handler
"""
available_apps = [
'django.contrib.auth',
'django.contrib.contenttypes',
'auth_tests',
]
def test_check_password(self):
"""
Verify that check_password returns the correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Authentication_Provider
"""
User.objects.create_user('test', 'test@example.com', 'test')
# User not in database
self.assertIsNone(check_password({}, 'unknown', ''))
# Valid user with correct password
self.assertTrue(check_password({}, 'test', 'test'))
# correct password, but user is inactive
User.objects.filter(username='test').update(is_active=False)
self.assertFalse(check_password({}, 'test', 'test'))
# Valid user with incorrect password
self.assertFalse(check_password({}, 'test', 'incorrect'))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_check_password_custom_user(self):
"""
Verify that check_password returns the correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Authentication_Provider
with custom user installed
"""
CustomUser._default_manager.create_user('test@example.com', '1990-01-01', 'test')
# User not in database
self.assertIsNone(check_password({}, 'unknown', ''))
# Valid user with correct password'
self.assertTrue(check_password({}, 'test@example.com', 'test'))
# Valid user with incorrect password
self.assertFalse(check_password({}, 'test@example.com', 'incorrect'))
def test_groups_for_user(self):
"""
Check that groups_for_user returns correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Group_Authorisation
"""
user1 = User.objects.create_user('test', 'test@example.com', 'test')
User.objects.create_user('test1', 'test1@example.com', 'test1')
group = Group.objects.create(name='test_group')
user1.groups.add(group)
# User not in database
self.assertEqual(groups_for_user({}, 'unknown'), [])
self.assertEqual(groups_for_user({}, 'test'), [b'test_group'])
self.assertEqual(groups_for_user({}, 'test1'), [])
|
{
"content_hash": "5d259e162698dab80278d525f74b4c2c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 100,
"avg_line_length": 36.29113924050633,
"alnum_prop": 0.65852807813045,
"repo_name": "Endika/django",
"id": "c0751a082542f7680ef74e7673a5b8c2d9c07e1f",
"size": "2867",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/auth_tests/test_handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170527"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11459348"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
__author__ = 'Dimitry Lvovsky, dimitry@reviewpro.com'
# Copyright 2014 ReviewRank S.A ( ReviewPro )
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
import hashlib
import time
class Connect():
DEFAULT_URL = "http://connect.reviewpro.com"
DEFAULT_HTTPS_URL = "https://connect.reviewpro.com"
REVIEW_SUMMARIES_URL = "/v1/lodging/review/summaries"
CSQ_URL = "/v1/lodging/csq"
REVIEW_AVAILABLE_SRC_URL = "/v1/lodging/sources/available"
PUBLISHED_REVIEWS_SRC_URL = "/v1/lodging/review/published"
MANAGEMENT_RESPONSES_URL = "/v1/lodging/review/responses"
PIDS_FOR_ACCOUNT_URL = "/v1/account/lodgings"
DAILY_INDEX_URL = "/v1/lodging/index/daily"
LODGIN_DIST_URL = "/v1/lodging/review/rating/distribution"
REVENUE_INDEXES_URL = "/v1/lodging/revenueindexes"
BACKOFFICE_PRODUCT_URL = "/bo/v1/product"
BACKOFFICE_USER_URL = "/bo/v1/user"
def __init__(self, api_key, api_sec):
self.__api_key = api_key
self.__api_sec = api_sec
def fetchDailyIndexForRating(self, pid, fd, td, rt, max_error=3):
params = {"pid": pid, "fd": fd, "td": td, "rt": rt}
self.__add_api_key(params)
url = Connect.DEFAULT_URL + Connect.DAILY_INDEX_URL
error_count = 0
while error_count < max_error:
resp = requests.get(url, params=params)
if resp.status_code == requests.codes.ok:
return resp.json()
error_count += 1
print "endpoint %s responded with error code %s, error %s out of %s sleeping for %s sec" % (url, resp.status_code, error_count, max_error, error_count ** 2)
time.sleep(error_count ** 2)
def fetchDailyDistribution(self, pid, fd, td, rt, max_error=3):
params = {"pid": pid, "fd": fd, "td": td, "rt": rt, 'timeseries':True}
self.__add_api_key(params)
url = Connect.DEFAULT_URL + Connect.LODGIN_DIST_URL
error_count = 0
while error_count < max_error:
resp = requests.get(url, params=params)
if resp.status_code == requests.codes.ok:
return resp.json()
error_count += 1
print "endpoint %s responded with error code %s, error %s out of %s sleeping for %s sec" % (url, resp.status_code, error_count, max_error, error_count ** 2)
time.sleep(error_count ** 2)
def fetchReviewSummaries(self, pid, fd, td, rw=10, st=0, max_error=3):
params = {"pid": pid, "fd": fd, "td": td, "rw": rw, "st": st}
self.__add_api_key(params)
url = Connect.DEFAULT_URL + Connect.REVIEW_SUMMARIES_URL
error_count = 0
while error_count < max_error:
resp = requests.get(url, params=params)
if resp.status_code == requests.codes.ok:
return resp.json()
error_count += 1
print "endpoint %s responded with error code %s, error %s out of %s sleeping for %s sec" % (url, resp.status_code, error_count, max_error, error_count ** 2)
time.sleep(error_count ** 2)
def fetchAvailableSrc(self, pid, fd, td, max_error=3):
params = {"pid": pid, "fd": fd, "td": td}
self.__add_api_key(params)
url = Connect.DEFAULT_URL + Connect.REVIEW_AVAILABLE_SRC_URL
error_count = 0
while error_count < max_error:
resp = requests.get(url, params=params)
if resp.status_code == requests.codes.ok:
return resp.json()
error_count += 1
print "endpoint %s responded with error code %s, error %s out of %s, sleeping for %s sec" % (url, resp.status_code, error_count, max_error, error_count ** 2)
time.sleep(error_count ** 2)
def fetchPublishedReviews(self, pid, max_error=3):
params = {"pid": pid}
self.__add_api_key(params)
url = Connect.DEFAULT_URL + Connect.PUBLISHED_REVIEWS_SRC_URL
error_count = 0
while error_count < max_error:
resp = requests.get(url, params=params)
if resp.status_code == requests.codes.ok:
return resp.json()
error_count += 1
print "endpoint %s responded with error code %s, error %s out of %s, sleeping for %s sec" % (url, resp.status_code, error_count, max_error, error_count ** 2)
time.sleep(error_count ** 2)
def fetchPidsForAccount(self, max_error=3):
params = {'aid':'143'}
self.__add_api_key(params)
url = Connect.DEFAULT_URL + Connect.PIDS_FOR_ACCOUNT_URL
error_count = 0
while error_count < max_error:
resp = requests.get(url, params=params)
if resp.status_code == requests.codes.ok:
return resp.json()
error_count += 1
print "endpoint %s responded with error code %s, error %s out of %s, sleeping for %s sec" % (url, resp.status_code, error_count, max_error, error_count ** 2)
time.sleep(error_count ** 2)
def fetchManagementResponses(self, pid, fd, td, max_error=3):
params = {"pid": pid, "fd": fd, "td": td}
self.__add_api_key(params)
url = Connect.DEFAULT_URL + Connect.MANAGEMENT_RESPONSES_URL
print url
error_count = 0
while error_count < max_error:
resp = requests.get(url, params=params)
if resp.status_code == requests.codes.ok:
return resp.json()
error_count += 1
print "endpoint %s responded with error code %s, error %s out of %s, sleeping for %s sec" % (url, resp.status_code, error_count, max_error, error_count ** 2)
time.sleep(error_count ** 2)
def pushCSQ(self, data, pid, max_error=3):
url = Connect.DEFAULT_URL + Connect.CSQ_URL
print url
params = {"pid": pid}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.post(url, data=data, headers=headers, params=params)
def pushCSQXML(self, data, pid, max_error=3):
url = Connect.DEFAULT_URL + Connect.CSQ_URL
print url
params = {"pid": pid}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/xml'}
return requests.post(url, data=data, headers=headers, params=params)
def postRevenueIndexes(self, data):
url = Connect.DEFAULT_HTTPS_URL + Connect.REVENUE_INDEXES_URL
params = {}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.post(url, data=data, headers=headers, params=params)
def postBackofficeProduct(self, data):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_PRODUCT_URL
params = {}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.post(url, data=data, headers=headers, params=params)
def putBackofficeProduct(self, ppid, data):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_PRODUCT_URL + "/" + ppid
params = {}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.put(url, data=data, headers=headers, params=params)
def getBackofficeProduct(self, ppid, dpid):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_PRODUCT_URL + "/" + ppid
params = {"sourceType": dpid}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.get(url, headers=headers, params=params)
def listBackofficeProduct(self, dpid):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_PRODUCT_URL + "s"
params = {"sourceType": dpid}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.get(url, headers=headers, params=params)
def deleteBackofficeProduct(self, ppid, dpid):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_PRODUCT_URL + "/" + ppid
params = {"sourceType": dpid}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.delete(url, headers=headers, params=params)
def postBackofficeUser(self, data):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_USER_URL
params = {}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.post(url, data=data, headers=headers, params=params)
def putBackofficeUser(self, username, data):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_USER_URL + "/" + username
params = {}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.put(url, data=data, headers=headers, params=params)
def getBackofficeUser(self, username, dpid):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_USER_URL + "/" + username
params = {"sourceType": dpid}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.get(url, headers=headers, params=params)
def listBackofficeUser(self):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_USER_URL + "s"
params = {}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.get(url, headers=headers, params=params)
def deleteBackofficeUser(self, username):
url = Connect.DEFAULT_HTTPS_URL + Connect.BACKOFFICE_USER_URL + "/" + username
params = {}
self.__add_api_key(params)
self.__add_signature(params)
headers = {'content-type': 'application/json'}
return requests.delete(url, headers=headers, params=params)
# private methods
def __add_signature(self, params):
m = hashlib.sha256()
m.update(str.encode(self.__api_key + self.__api_sec + repr(int(time.time()))))
params['sig'] = m.hexdigest()
def __add_api_key(self, params):
params['api_key'] = self.__api_key
|
{
"content_hash": "969194d81ff6e733d0d8755f75a6ee2b",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 169,
"avg_line_length": 45.24180327868852,
"alnum_prop": 0.6139143038318688,
"repo_name": "reviewpro/api_connect",
"id": "acd2999846001d89cdf96a956fd5f7b1da2a65ba",
"size": "11039",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "connect_py/Connect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "7072"
},
{
"name": "Python",
"bytes": "11690"
}
],
"symlink_target": ""
}
|
from django_jinja.library import filter
__author__ = 'AlexStarov'
#register = Library()
#@register.filter(name='true_false', )
@filter(name='true_false', )
def true_false(value, ):
# if bool(value):
if value:
return u'Да'
elif not value:
return u'Нет'
else:
return u'Что-то ещё'
# else:
# return str(value)
|
{
"content_hash": "3ba73c8d9f48b649a3ec6fb8f282e4d8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 39,
"avg_line_length": 20.210526315789473,
"alnum_prop": 0.5572916666666666,
"repo_name": "AlexStarov/Shop",
"id": "560b9dbed98b78f4b1ec03ebfef0efea3bf92ffc",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/utils/templatetags/true_false.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "268281"
},
{
"name": "HTML",
"bytes": "138853"
},
{
"name": "JavaScript",
"bytes": "10629133"
},
{
"name": "PHP",
"bytes": "14"
},
{
"name": "Python",
"bytes": "1532862"
},
{
"name": "Shell",
"bytes": "2089"
}
],
"symlink_target": ""
}
|
from webserver import application
if __name__ == '__main__':
application.run()
|
{
"content_hash": "6f0b0d969a1bd3cdb9e41507b87e906c",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 33,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.675,
"repo_name": "msemple1111/average_alpr",
"id": "24a52e28eb38dc956fa3cd2f0e723474774dfb0b",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30023"
},
{
"name": "Shell",
"bytes": "185"
}
],
"symlink_target": ""
}
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, NumericProperty
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen
import traceback
from random import randint
import random
import time
class MenuScreen(Screen):
def update(self, dt):
pass
def load_game(self):
#app.sm.current = 'snake'
name = str(time.time())
s = GameScreen(name=name)
app.sm.add_widget(s)
app.sm.current = name
class GameScreen(Screen):
def update(self,dt):
pass
def pre(self):
print("pre-enter")
def start(self):
print("starting...")
try:
game.setup()
except:
self.game = SnakeGame()
game = self.game
game.setup()
self.game_update = Clock.schedule_interval(game.update, 1.0/60.0)
self.add_widget(game)
def end(self):
print("I've exited")
Clock.unschedule(self.game_update)
class SnakeGame(Widget):
apple = ObjectProperty(None)
snake = ObjectProperty(None)
segs = []
apple_count = NumericProperty(0)
def setup(self):
## self.snake = Snake(parent=self)
## self.apple = Apple(parent=self)
segs = []
width = Window.width / 10
height = Window.height / 10
self.apple.newSpot(width,height)
self._keyboard = Window.request_keyboard(self.keyboard_closed, self)
self._keyboard.bind(on_key_down=self.on_keyboard_down)
self.snake.pos = ((height/2)*10,(width/2)*10)
def update(self, dt):
width = int(Window.width / 10)
height = int(Window.height / 10)
self.snake.update()
if self.snake.eat_apple(self.apple):
self.apple.newSpot(width,height)
self.apple_count += 1
def on_touch_down(self, touch):
if self.snake.direction == 0 or self.snake.direction == 2:
if touch.x > self.snake.x:
self.snake.direction = 1
else:
self.snake.direction = 3
else:
if touch.y > self.snake.y:
self.snake.direction = 0
else:
self.snake.direction = 2
self.snake.move()
def keyboard_closed(self):
self._keyboard.unbind(on_key_down=self.on_keyboard_down)
self._keyboard = None
def on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 'up':
self.snake.direction = 0
if keycode[1] == 'right':
self.snake.direction = 1
if keycode[1] == 'down':
self.snake.direction = 2
if keycode[1] == 'left':
self.snake.direction = 3
self.snake.move()
def add_segment(self,segment):
self.add_widget(segment)
self.segs.append(segment)
for s in self.segs:
if s.life <= 0:
self.remove_widget(s)
self.segs.pop(self.segs.index(s))
else:
s.life -= 1
if s != segment:
if self.snake.bite(s):
self.dead()
def dead(self):
#App.get_running_app().stop()
for line in traceback.format_stack(): print(line.strip())
app.sm.current = 'menu'
class Snake(Widget):
direction = 0
speed = 30
advance = 0
length = 5
def update(self):
if self.advance == 0:
self.move()
self.advance -= 1
def move(self):
if self.direction == 0:
self.pos = self.x,self.y+10
elif self.direction == 1:
self.pos = self.x+10,self.y
elif self.direction == 2:
self.pos = self.x,self.y-10
else:
self.pos = self.x-10,self.y
if self.x > Window.width or self.y > Window.height:
self.parent.dead()
if self.x < 0 or self.y < 0:
self.parent.dead()
ss = SnakeSegment()
ss.pos = self.pos
ss.life = self.length
self.parent.add_segment(ss)
self.advance = self.speed
def eat_apple(self,apple):
if apple.x == self.x and apple.y == self.y:
self.length += 3
self.speed -= 1
return True
return False
def bite(self,seg):
if seg.x == self.x and seg.y == self.y:
return True
return False
class SnakeSegment(Widget):
life = 5
class Apple(Widget):
def newSpot(self,x,y):
self.pos = (randint(1,x-1)*10, randint(1,y-1)*10)
class SnakeApp(App):
sm = ScreenManager()
def build(self):
menu = MenuScreen(name='menu')
self.sm.add_widget(menu)
game = GameScreen(name='snake')
self.sm.add_widget(game)
self.sm.load_game = menu.load_game
Clock.schedule_interval(menu.update, 1.0/60.0)
return self.sm
app = None
if __name__ == '__main__':
app = SnakeApp()
app.run()
|
{
"content_hash": "c5e8c73df9b95e579a317bd36a99b97a",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 70,
"avg_line_length": 21.89637305699482,
"alnum_prop": 0.6635115948887838,
"repo_name": "Narcolapser/PyGameLearningByDoing",
"id": "6fbbb2e569c8b4d37a074321a7796679ba1d0a8c",
"size": "4226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Snake/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15910"
},
{
"name": "Ruby",
"bytes": "13166"
}
],
"symlink_target": ""
}
|
"""Contains the base Layer class, from which all layers inherit.
This is a private class and its internal implementation is subject to changes
in the future.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import variable_scope as vs
class _Layer(object):
"""Base layer class.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
This is the class from which all layers inherit, implementing common
infrastructure functionality.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing variables,
losses, and updates, as well as applying TensorFlow ops to input tensors.
Properties:
trainable: Whether the layer should be trained (boolean).
name: The name of the layer (string).
dtype: Default dtype of the layer (dtypes.float32).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
"""
def __init__(self, trainable=True, name=None,
dtype=dtypes.float32, **kwargs):
# We use a kwargs dict here because these kwargs only exist
# for compatibility reasons.
# The list of kwargs is subject to changes in the future.
# We do not want to commit to it or to expose the list to users at all.
# Note this is exactly as safe as defining kwargs in the function signature,
# the only difference being that the list of valid kwargs is defined
# below rather rather in the signature, and default values are defined
# in calls to kwargs.get().
allowed_kwargs = {
'_scope',
'_reuse',
}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
self._trainable = trainable
self._built = False
self._trainable_variables = []
self._non_trainable_variables = []
self._updates = []
self._losses = []
self._reuse = kwargs.get('_reuse')
self.dtype = dtype
# Determine base name (non-unique).
base_name = name
if not name:
base_name = _to_snake_case(self.__class__.__name__)
# Determine variable scope.
scope = kwargs.get('_scope')
if scope:
self._scope = next(vs.variable_scope(scope).gen)
else:
self._scope = next(vs.variable_scope(None, default_name=base_name).gen)
# Unique name is borrowed from scope to match variable names.
self.name = self._scope.name
def __setattr__(self, name, value):
if hasattr(self, name):
# Only allow private attributes to be set more than once, under the
# convention that private attributes should only be set from inside
# the class.
# All attributes meant to be set several times should be set to private.
if name[0] != '_':
raise AttributeError('Read-only property cannot be set: %s' % name)
super(_Layer, self).__setattr__(name, value)
@property
def trainable_variables(self):
return self._trainable_variables if self.trainable else []
@property
def non_trainable_variables(self):
return self._non_trainable_variables if self.trainable else self.variables
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self._trainable_variables + self._non_trainable_variables
@property
def updates(self):
return self._updates
@property
def losses(self):
return self._losses
@property
def built(self):
return self._built
@property
def trainable(self):
return self._trainable
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.variables
def build(self, _):
"""Creates the variables of the layer.
"""
self._built = True
def call(self, inputs, **kwargs):
"""The logic of the layer lives here.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments.
Returns:
Output tensor(s).
"""
raise NotImplementedError
def _add_variable(self, name, shape, dtype=None,
initializer=None, regularizer=None, trainable=True,
variable_getter=vs.get_variable):
"""Adds a new variable to the layer.
Arguments:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
variable_getter: The getter to use for TensorFlow variables.
Returns:
The created variable.
"""
if dtype is None:
dtype = self.dtype
existing_variables = set(tf_variables.global_variables())
variable = variable_getter(name,
shape=shape,
initializer=initializer,
dtype=dtype,
trainable=trainable and self.trainable)
# TODO(sguada) fix name = variable.op.name
if variable in existing_variables:
return variable
if regularizer:
# To match the behavior of tf.get_variable(), we only
# apply regularization if the variable is newly created.
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
with ops.colocate_with(v.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
else:
with ops.colocate_with(variable.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(variable)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
if trainable:
self._trainable_variables.append(variable)
else:
self._non_trainable_variables.append(variable)
return variable
def __call__(self, inputs, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
# Define a custom getter to override tf.get_variable when creating layer
# variables. We respect current custom getter, if one is set.
current_custom_getter = vs.get_variable_scope().custom_getter
def variable_getter(getter, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, **kwargs):
if current_custom_getter is not None:
getter = functools.partial(current_custom_getter, getter)
return self._add_variable(
name, shape, initializer=initializer, regularizer=regularizer,
dtype=dtype, trainable=trainable,
variable_getter=functools.partial(getter, **kwargs))
# Build (if necessary) and call the layer, inside a variable scope.
with vs.variable_scope(self._scope,
reuse=True if self._built else self._reuse,
custom_getter=variable_getter) as scope:
with ops.name_scope(scope.original_name_scope):
if not self.built:
input_list = _to_list(inputs)
input_shapes = [x.get_shape() for x in input_list]
if len(input_shapes) == 1:
self.build(input_shapes[0])
else:
self.build(input_shapes)
self._built = True
outputs = self.call(inputs, **kwargs)
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
output_list = _to_list(outputs)
for output in output_list:
with ops.name_scope('ActivityRegularizer'):
activity_regularization = self.activity_regularizer(output)
self._losses.append(activity_regularization)
_add_elements_to_collection(
activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def apply(self, inputs, **kwargs):
"""Apply the layer on a input.
This simply wraps `self.__call__`.
Arguments:
inputs: Input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, **kwargs)
def _to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def _to_list(x):
"""This normalizes a list/tuple or single element into a list.
If a single element is passed, we return
a list of size 1 containing the element.
Arguments:
x: list or tuple or single element.
Returns:
A list.
"""
if isinstance(x, (list, tuple)):
return list(x)
return [x]
def _add_elements_to_collection(elements, collections):
elements = _to_list(elements)
collections = _to_list(collections)
for name in collections:
collection = ops.get_collection_ref(name)
collection_set = set(collection)
for element in elements:
if element not in collection_set:
collection.append(element)
|
{
"content_hash": "25986053b0de00eb9f180de374447b8c",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 80,
"avg_line_length": 33.94637223974763,
"alnum_prop": 0.6581172753461574,
"repo_name": "calebfoss/tensorflow",
"id": "74a6052ff6b33602befa3405b3e21438ada2376a",
"size": "11501",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/layers/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "128013"
},
{
"name": "C++",
"bytes": "20145558"
},
{
"name": "CMake",
"bytes": "112204"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "96872"
},
{
"name": "HTML",
"bytes": "534896"
},
{
"name": "Java",
"bytes": "215285"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "3034804"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "29661"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "183901"
},
{
"name": "Python",
"bytes": "16293967"
},
{
"name": "Shell",
"bytes": "314152"
},
{
"name": "TypeScript",
"bytes": "761620"
}
],
"symlink_target": ""
}
|
import re
import socket
import contextlib
import sys
import random
import argparse
import logging
from collections import namedtuple
from functools import partial
import dns.resolver
import dns.update
import dns.exception
import dns.tsigkeyring
import dns.query
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
from urllib2 import urlopen, URLError
else:
from urllib.request import urlopen
from urllib.error import URLError
MAX_RESPONSE_DATA = 8192
# TODO: Support IPv6 too
SIMPLE_IPV4_RE = re.compile("(?:\d{1,3}\.){3}\d{1,3}")
logger = logging.getLogger(__name__)
def ip_from_dyndns(data):
for m in SIMPLE_IPV4_RE.finditer(data):
return m.group()
# TODO: support dig +short myip.opendns.com @resolver1.opendns.com
HTTPS_IP_SERVICES = (
('https:icanhazip', 'https://icanhazip.com/'),
)
HTTP_IP_SERVICES = (
('http:dyndns', 'http://checkip.dyndns.com/', ip_from_dyndns),
('http:icanhazip', 'http://icanhazip.com/'),
('http:curlmyip', 'http://curlmyip.com/'),
('http:ifconfigme', 'http://ifconfig.me/ip'),
('http:ip.appspot.com', 'http://ip.appspot.com'),
('http:ipinfo', 'http://ipinfo.io/ip'),
('http:externalip', 'http://api.externalip.net/ip'),
('http:trackip', 'http://www.trackip.net/ip'),
('http:ipz', 'http://ipz.herokuapp.com/')
)
ALL_IP_SERVICES = HTTP_IP_SERVICES + HTTPS_IP_SERVICES
def validate_ipv4(ip_text):
try:
socket.inet_aton(ip_text)
except socket.error:
return False
else:
return True
def validate_ipv6(ip_text):
try:
socket.inet_pton(socket.AF_INET6, ip_text)
except socket.error:
return False
else:
return True
def validate_ipv46(ip_text):
return validate_ipv4(ip_text) or validate_ipv6(ip_text)
def simple_ip_fetch(url, extract_fun=lambda x: x.strip(), timeout=5):
logger.debug('fetching url "{0}"'.format(url))
try:
with contextlib.closing(urlopen(url, timeout=timeout)) as resp:
# Limit response size
data = resp.read(MAX_RESPONSE_DATA)
except (URLError, socket.error) as e:
logger.warn('couldn\'t fetch url "{0}" with timeout {1:.4g}'
.format(url, timeout))
return None
else:
ip = extract_fun(data)
if ip and validate_ipv4(ip):
return ip
class IpFetchError(Exception):
pass
class SimpleIpGetter(object):
DEFAULT_TIMEOUT = 5
DEFAULT_TRIES = 3
ALL_SERVICES = ALL_IP_SERVICES
def __init__(self, services):
self.services = {}
for service in services:
name = service[0]
self.services[name] = service[1:]
if not self.services:
raise ValueError("At least one service should exist")
self.service_names = tuple(self.services.keys())
def query_service(self, service_name, timeout=DEFAULT_TIMEOUT):
logger.info('query service "{0}"'.format(service_name))
if service_name not in self.services:
raise ValueError("Bad service_name '{0}'".format(service_name))
args = self.services[service_name]
ip = simple_ip_fetch(*args, timeout=timeout)
logger.debug('service "{0}" return such ip "{1}"'
.format(service_name, ip))
return ip
def iter_rand_service(self, num):
l = len(self.service_names)
for i in range(num):
if i % l == 0:
el = random.randrange(l)
yield self.service_names[el]
next_array = list(self.service_names)
del next_array[el]
k = l
else:
k -= 1
el = random.randrange(k)
yield next_array[el]
del next_array[el]
def get(self, tries=DEFAULT_TRIES, timeout=DEFAULT_TIMEOUT):
logger.debug('try determine ip address')
for service in self.iter_rand_service(tries):
res = self.query_service(service, timeout=timeout)
if res is not None:
return res
logger.debug('can\'t determine ip address')
raise IpFetchError("Can't fetch ip address")
@staticmethod
def get_service_info(servicename):
return servicename.split(':', 1)
@classmethod
def service_info_iterator(cls, services):
for service in services:
servicename = service[0]
service_type, name = cls.get_service_info(servicename)
yield (service_type, name, service)
@classmethod
def get_types_and_names(cls):
types, names = [], []
for stype, name, _ in cls.service_info_iterator(cls.ALL_SERVICES):
types.append(stype)
names.append(name)
return frozenset(types), frozenset(names)
@classmethod
def filter_services(cls, services, types=None, names=None):
if types is not None:
types = frozenset(types)
if names is not None:
names = frozenset(names)
for type, name, service in cls.service_info_iterator(services):
if types is None or type in types:
if names is None or name in names:
yield service
@classmethod
def create_new_ip_getter(cls, types=None, names=None):
service_iter = cls.filter_services(cls.ALL_SERVICES, types, names)
return cls(service_iter)
#set types and names
SimpleIpGetter.SERVICE_TYPES, SimpleIpGetter.SERVICE_NAMES = \
SimpleIpGetter.get_types_and_names()
class BadToken(Exception):
pass
class ParseError(Exception):
pass
KeyData = namedtuple("KeyData", ["name", "algorithm", "key"])
class KeyConfigParser(object):
ALGORITHMS = frozenset([
"hmac-md5", "hmac-sha1", "hmac-sha224", "hmac-sha256", "hmac-sha384",
"hmac-sha512"
])
def __init__(self):
self.keys = {}
self.states = list()
self.keys_names = []
self.current_key_name = None
self.current_key_algorithm = None
self.current_key_data = None
def get_space(self, match):
pass
def get_keyword(self, match):
text = match.group().lower()
if text == "key" and self.state is None:
self.state = "keyname"
elif text == "algorithm" and self.state == "keyblock":
self.states.append('algorithm')
elif text == "secret" and self.state == "keyblock":
self.states.append('secret')
elif self.state == "algorithm":
if text in self.ALGORITHMS:
self.current_key_algorithm = text
self.state = "waitend"
else:
raise ParseError('Bad algorithm type "{0}"'.format(text))
else:
raise ParseError(
'Bad keyword "{0}" with state "{1}"'
.format(text, str(self.state))
)
def get_string(self, match):
value, = match.groups()
if self.state == "keyname":
self.state = "waitblock"
if value not in self.keys:
# get keyname
self.current_key_name = value
else:
raise ParseError('Key "{0}" already exists'.format(value))
elif self.state == "secret":
self.current_key_data = value
self.state = "waitend"
else:
raise ParseError('Bad string {0}'.format(value))
def get_block_begin(self, match):
if self.state == "waitblock":
self.state = "keyblock"
else:
raise ParseError("Bad block")
def get_block_end(self, match):
keys_data = [
self.current_key_name, self.current_key_algorithm,
self.current_key_data
]
if None in keys_data:
raise ParseError("Bad key data {0}".format(str(keys_data)))
self.get_new_key(self.current_key_name, self.current_key_algorithm,
self.current_key_data)
self.state = "waitend"
def get_end(self, match):
if self.state == "waitend":
self.states.pop()
else:
raise ParseError("Bad end statement")
def get_new_key(self, key_name, algorithm, key_data):
key = KeyData(key_name, algorithm, key_data)
self.keys[key_name] = key
self.keys_names.append(key_name)
def get_eof(self):
if self.state is not None or len(self.keys_names) == 0:
raise ParseError("Bad keyfile")
@property
def state(self):
if not self.states:
return None
return self.states[-1]
@state.setter
def state(self, val):
if self.states:
self.states.pop()
self.states.append(val)
def get_key(self, key_name=None):
if key_name is None:
key_name = self.keys_names[0]
return self.keys[key_name]
@classmethod
def parse_keys(cls, data):
parser = cls()
KeyConfig.parse(data, parser)
return parser
class KeyConfig(object):
WHITE_SPACE_RE = re.compile("\s+")
KEYWORD_RE = re.compile("[a-z]+[a-z\d\-]*[a-z\d]+", re.I)
STRING_RE = re.compile('"([^"]*)"')
BLOCK_BEGIN_RE = re.compile('{')
BLOCK_END_RE = re.compile('}')
END_COMMAND_RE = re.compile(';')
class Tokens(object):
SPACE = 0
KEYWORD = 1
STRING = 2
BLOCK_BEGIN = 3
BLOCK_END = 4
END_COMMAND = 5
TOKENS_DATA = (
(WHITE_SPACE_RE, Tokens.SPACE),
(KEYWORD_RE, Tokens.KEYWORD),
(STRING_RE, Tokens.STRING),
(BLOCK_BEGIN_RE, Tokens.BLOCK_BEGIN),
(BLOCK_END_RE, Tokens.BLOCK_END),
(END_COMMAND_RE, Tokens.END_COMMAND)
)
@classmethod
def get_current_token(cls, data, start_pos=0):
for token_re, token_id in cls.TOKENS_DATA:
m = token_re.match(data, start_pos)
if m is not None:
return (m, token_id)
message_data = data[start_pos:start_pos + 40]
raise BadToken('Unknown token "{0}"'.format(message_data))
@classmethod
def tokenize(cls, data):
pos, l = 0, len(data)
while pos < l:
m, token_id = cls.get_current_token(data, pos)
yield (m, token_id)
pos = m.end()
@classmethod
def parse(cls, data, parser):
tokens_methods = {
cls.Tokens.SPACE: 'get_space',
cls.Tokens.KEYWORD: 'get_keyword',
cls.Tokens.STRING: 'get_string',
cls.Tokens.BLOCK_BEGIN: 'get_block_begin',
cls.Tokens.BLOCK_END: 'get_block_end',
cls.Tokens.END_COMMAND: 'get_end'
}
for m, token_id in cls.tokenize(data):
method = tokens_methods.get(token_id)
if method is None:
raise BadToken('Uknown token "{0}"'.format(token_id))
getattr(parser, method)(m)
# end of file reached
parser.get_eof()
@staticmethod
def get_keyring(key_name, key_data):
return dns.tsigkeyring.from_text({key_name: key_data})
class NameUpdate(object):
DEFAULT_TTL = 600
def __init__(self, server, zone, key, keyname=None, port=53):
self.server = server
self.zone = zone
if isinstance(key, KeyData):
self.key = key
else:
self.key = self.key_from_file(key, keyname)
self.port = 53
def get_updater(self):
return self.build_updater(self.zone, self.key)
def send(self, update, timeout=7):
logger.info('send update message to server "%s",port %d, timeout %d',
self.server, self.port, timeout)
dns.query.tcp(update, self.server, timeout=timeout, port=self.port)
def update_a(self, domain, ip, resolver, ttl=DEFAULT_TTL, timeout=7):
try:
old_ip = self.check_name(domain, resolver)
except dns.resolver.NXDOMAIN:
logger.debug('domain "{0!s}" not exists'.format(domain))
updater = self.get_updater()
updater.add(domain.relativize(self.zone), ttl, 'A', ip)
self.send(updater, timeout=timeout)
return True
else:
logger.debug('domain "{0}" A: "{1}", new ip "{2}"'
.format(domain.to_text(), old_ip, ip))
if ip != old_ip:
updater = self.get_updater()
updater.replace(domain.relativize(self.zone), ttl, 'A', ip)
self.send(updater, timeout=timeout)
return True
return False
@staticmethod
def build_updater(zone, key):
keyring = KeyConfig.get_keyring(key.name, key.key)
return dns.update.Update(zone, keyring=keyring,
keyalgorithm=key.algorithm)
@staticmethod
def build_resolver_by_ip(server_ip, port=53):
new_resolver = dns.resolver.Resolver(configure=False)
new_resolver.nameservers.append(server_ip)
new_resolver.port = port
return new_resolver
@classmethod
def build_resolver(cls, server, port=53):
logger.debug('build resolver for server "%s"', server)
if validate_ipv46(server):
return cls.build_resolver_by_ip(server, port)
for rdata in dns.resolver.query(server, 'A'): # pragma: no branch
return cls.build_resolver_by_ip(rdata.address, port)
@staticmethod
def key_from_file(filename, keyname=None):
if hasattr(filename, "read"):
data = filename.read()
else:
with open(filename, 'rb') as f:
data = f.read()
return KeyConfigParser.parse_keys(data).get_key(keyname)
@staticmethod
def determine_server(zone, resolver=None):
if resolver is None:
resolver = dns.resolver.get_default_resolver()
for rdata in resolver.query(zone, 'SOA'):
return rdata.mname.to_text()
@staticmethod
def check_name(name, resolver=None):
if resolver is None:
resolver = dns.resolver.get_default_resolver()
logger.info('try resolve A record of {0!s}'.format(name))
for rdata in resolver.query(name, 'A'):
return rdata.address
def comma_separated_list(values):
check_values = frozenset(values)
def parse(input_str):
res = frozenset(input_str.split(','))
diff = res - check_values
if diff:
raise argparse.ArgumentTypeError(
"Bad input {0}".format(str(tuple(diff)))
)
return res
return parse
def integer_range(min=None, max=None, num_type=int):
def validate_int(value):
try:
res = num_type(value)
except ValueError:
raise argparse.ArgumentTypeError('Invalid integer "%s"' % value)
else:
if min is not None and res < min:
raise argparse.ArgumentTypeError(
'Value "{val}" is smaller than "{min}"'
.format(val=res, min=min)
)
elif max is not None and res > max:
raise argparse.ArgumentTypeError(
'Value "{val}" is bigger than "{max}"'
.format(val=res, max=min)
)
return res
return validate_int
class Program(object):
COMMANDS = {
'checkip': 'checkip_command',
'update': 'update_command'
}
VERBOSITY = (
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG,
)
SERVICE_TYPES = frozenset(['http', 'https'])
def __init__(self):
self.parser = self.build_parser()
def run(self, args=None, log=True):
if log and not logger.handlers:
self.set_loghandler()
namespace = self.parser.parse_args(args)
self.execute(namespace, log=log)
def set_verbosity(self, verbosity):
count = len(self.VERBOSITY)
if verbosity >= count:
verbosity = count - 1
logger.setLevel(self.VERBOSITY[verbosity])
def set_loghandler(self):
logger.addHandler(logging.StreamHandler())
def execute(self, namespace, log=True):
exec_command = self.COMMANDS.get(namespace.command)
if exec_command is not None:
if log:
self.set_verbosity(namespace.verbose)
logger.debug('args namespace "%s"' % str(namespace))
kwargs = vars(namespace)
del kwargs['command']
del kwargs['verbose']
getattr(self, exec_command)(**kwargs)
else:
raise ValueError("Bad command {0}".format(namespace.command))
def checkip_command(self, *args, **kwargs):
ip_fun = self.ip_fun(*args, **kwargs)
try:
print(ip_fun())
except IpFetchError as e:
self.service_error(str(e))
def get_key(self, keyfile, keyname=None):
try:
key = NameUpdate.key_from_file(keyfile, keyname)
except ParseError:
self.parser.error("Bad key format")
except KeyError:
self.parser.error("Bad key name")
else:
return key
def update_command(self, name, keyfile, keyname, zone=None, server=None,
tries=5, timeout=5, types=None, services=None,
ttl=NameUpdate.DEFAULT_TTL, **kwargs):
ip_fun = self.ip_fun(tries=tries, timeout=timeout, types=types,
services=services)
name = dns.name.from_text(name)
key = self.get_key(keyfile, keyname)
if zone is None:
zone = self.determine_zone(name)
logger.info('determine zone by name: zone "{0}" name "{1}"'
.format(zone.to_text(), name.to_text()))
else:
zone = dns.name.from_text(zone)
if server is None:
server = NameUpdate.determine_server(zone)
logger.info('determine server by zone: server "{0}" zone "{1}"'
.format(server, zone.to_text()))
try:
ip = ip_fun()
except IpFetchError as e:
self.service_error(str(e))
else:
try:
resolver = NameUpdate.build_resolver(server)
nu = NameUpdate(server, zone, key=key)
nu.update_a(name, ip, resolver, ttl)
except dns.exception.Timeout:
self.service_error("Timeout when sending dns query")
def determine_zone(self, name):
try:
return dns.resolver.zone_for_name(name)
except dns.exception.Timeout:
self.service_error("Could not determine zone name")
def service_error(self, message):
self.parser.exit(69, message + '\n')
def ip_fun(self, tries=5, timeout=5, types=None, services=None):
try:
ip_get = SimpleIpGetter.create_new_ip_getter(types, services)
except ValueError as e:
self.parser.error(str(e))
return partial(ip_get.get, tries=tries, timeout=timeout)
@classmethod
def ip_arguments(cls, parser):
tries_type = integer_range(min=1)
timeout_type = integer_range(min=0, num_type=float)
types_type = comma_separated_list(SimpleIpGetter.SERVICE_TYPES)
services_type = comma_separated_list(SimpleIpGetter.SERVICE_NAMES)
parser.add_argument('-n', '--tries', dest="tries", type=tries_type,
default=5)
parser.add_argument('-t', '--types', dest="types", type=types_type,
default=None)
parser.add_argument('--services', dest="services", type=services_type,
default=None)
parser.add_argument('--timeout', dest="timeout", type=timeout_type,
default=5)
@classmethod
def build_parser(cls):
file_type = argparse.FileType('rb')
parser = argparse.ArgumentParser(description="dynamic dns update")
parser.add_argument('-v', '--verbose', action='count', dest='verbose',
default=0)
subparsers = parser.add_subparsers(dest="command")
checkip_parser = subparsers.add_parser('checkip', help="return ip")
cls.ip_arguments(checkip_parser)
update_parser = subparsers.add_parser('update', help="update record")
update_parser.add_argument('-k', '--key', type=file_type,
required=True, dest='keyfile')
update_parser.add_argument('--keyname', type=str, dest='keyname',
default=None)
update_parser.add_argument('--ttl', type=int, dest='ttl', default=600)
update_parser.add_argument('-s', '--server', type=str, dest='server',
default=None)
update_parser.add_argument('-z', '--zone', type=str, dest='zone',
default=None)
update_parser.add_argument('name', type=str)
cls.ip_arguments(update_parser)
return parser
def main(): # pragma: no cover
Program().run()
if __name__ == "__main__": # pragma: no cover
main()
|
{
"content_hash": "b6856015d8c86cbbca6fcee48a2e91da",
"timestamp": "",
"source": "github",
"line_count": 680,
"max_line_length": 78,
"avg_line_length": 31.28235294117647,
"alnum_prop": 0.5683527641970666,
"repo_name": "bacher09/dynsupdate",
"id": "e1e192f8ee938bdee13d9c0195a56564f9cbf855",
"size": "21272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynsupdate/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "48386"
}
],
"symlink_target": ""
}
|
import datetime
import json
import os
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import amo
import amo.tests
import mkt
from amo.tests import formset, initial
from amo.tests.test_helpers import get_image_path
from mkt.constants.applications import DEVICE_TYPES
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.reviewers.models import EscalationQueue
from mkt.site.fixtures import fixture
from mkt.submit.decorators import read_dev_agreement_required
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.submit.models import AppSubmissionChecklist
from mkt.translations.models import Translation
from mkt.users.models import UserNotification, UserProfile
from mkt.users.notifications import app_surveys
from mkt.webapps.models import AddonDeviceType, AddonUser, AppFeatures, Webapp
class TestSubmit(amo.tests.TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.fi_mock = mock.patch(
'mkt.developers.tasks.fetch_icon').__enter__()
self.user = self.get_user()
assert self.client.login(username=self.user.email, password='password')
def tearDown(self):
self.fi_mock.__exit__()
def get_user(self):
return UserProfile.objects.get(username='regularuser')
def get_url(self, url):
return reverse('submit.app.%s' % url, args=[self.webapp.app_slug])
def _test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def _test_progress_display(self, completed, current):
"""Test that the correct steps are highlighted."""
r = self.client.get(self.url)
progress = pq(r.content)('#submission-progress')
# Check the completed steps.
completed_found = progress.find('.completed')
for idx, step in enumerate(completed):
li = completed_found.eq(idx)
eq_(li.text(), unicode(mkt.APP_STEPS_TITLE[step]))
# Check that we link back to the Developer Agreement.
terms_link = progress.find('.terms a')
if 'terms' in completed:
eq_(terms_link.attr('href'),
reverse('mkt.developers.docs', args=['policies', 'agreement']))
else:
eq_(terms_link.length, 0)
# Check the current step.
eq_(progress.find('.current').text(),
unicode(mkt.APP_STEPS_TITLE[current]))
class TestProceed(TestSubmit):
def setUp(self):
super(TestProceed, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def test_is_authenticated(self):
# Redirect user to Terms.
r = self.client.get(self.url)
self.assert3xx(r, reverse('submit.app.terms'))
def test_is_anonymous(self):
# Show user to Terms page but with the login prompt.
self.client.logout()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['proceed'], True)
class TestTerms(TestSubmit):
def setUp(self):
super(TestTerms, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app.terms')
def test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def test_jump_to_step(self):
r = self.client.get(reverse('submit.app'), follow=True)
self.assert3xx(r, self.url)
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#submit-terms')
eq_(doc.length, 1)
eq_(doc.find('input[name=newsletter]').siblings('label').length, 1,
'Missing its <label>!')
def test_progress_display(self):
self._test_progress_display([], 'terms')
@mock.patch('basket.subscribe')
def test_agree(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 0)
assert not subscribe_mock.called
@mock.patch('basket.subscribe')
def test_agree_and_sign_me_up(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement':
datetime.datetime.now(),
'newsletter': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 1)
notes = UserNotification.objects.filter(user=self.user, enabled=True,
notification_id=app_surveys.id)
eq_(notes.count(), 1, 'Expected to not be subscribed to newsletter')
subscribe_mock.assert_called_with(
self.user.email, 'app-dev', lang='en-US',
country='restofworld', format='H',
source_url='http://testserver/developers/submit')
def test_disagree(self):
r = self.client.post(self.url)
eq_(r.status_code, 200)
eq_(self.user.read_dev_agreement, None)
eq_(UserNotification.objects.count(), 0)
def test_read_dev_agreement_required(self):
f = mock.Mock()
f.__name__ = 'function'
request = mock.Mock()
request.user.read_dev_agreement = None
request.get_full_path.return_value = self.url
func = read_dev_agreement_required(f)
res = func(request)
assert not f.called
eq_(res.status_code, 302)
eq_(res['Location'], reverse('submit.app'))
class TestManifest(TestSubmit):
def setUp(self):
super(TestManifest, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
def test_anonymous(self):
r = self.client.get(self.url, follow=True)
eq_(r.context['step'], 'terms')
def test_cannot_skip_prior_step(self):
r = self.client.get(self.url, follow=True)
# And we start back at one...
self.assert3xx(r, reverse('submit.app.terms'))
def test_jump_to_step(self):
# I already read the Terms.
self._step()
# So jump me to the Manifest step.
r = self.client.get(reverse('submit.app'), follow=True)
eq_(r.context['step'], 'manifest')
def test_legacy_redirects(self):
def check():
for before, status in redirects:
r = self.client.get(before, follow=True)
self.assert3xx(r, dest, status)
# I haven't read the dev agreement.
redirects = (
('/developers/submit/', 302),
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
)
dest = '/developers/submit/terms'
check()
# I have read the dev agreement.
self._step()
redirects = (
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
('/developers/submit/manifest', 301),
)
dest = '/developers/submit/'
check()
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#upload-file').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms'], 'manifest')
class UploadAddon(object):
def post(self, expect_errors=False, data=None):
if data is None:
data = {'free_platforms': ['free-desktop']}
data.update(upload=self.upload.pk)
r = self.client.post(self.url, data, follow=True)
eq_(r.status_code, 200)
if not expect_errors:
# Show any unexpected form errors.
if r.context and 'form' in r.context:
eq_(r.context['form'].errors, {})
return r
class BaseWebAppTest(BaseUploadTest, UploadAddon, amo.tests.TestCase):
fixtures = fixture('user_999', 'user_10482')
def setUp(self):
super(BaseWebAppTest, self).setUp()
self.manifest = self.manifest_path('mozball.webapp')
self.manifest_url = 'http://allizom.org/mozball.webapp'
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
self.upload.update(name=self.manifest_url)
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
def post_addon(self, data=None):
eq_(Webapp.objects.count(), 0)
self.post(data=data)
return Webapp.objects.get()
class TestCreateWebApp(BaseWebAppTest):
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_post_app_redirect(self, fi_mock):
r = self.post()
webapp = Webapp.objects.get()
self.assert3xx(r,
reverse('submit.app.details', args=[webapp.app_slug]))
assert fi_mock.delay.called, (
'The fetch_icon task was expected to be called')
def test_no_hint(self):
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url), follow=True)
eq_(r.status_code, 200)
assert 'already submitted' not in r.content, (
'Unexpected helpful error (trap_duplicate)')
assert 'already exists' not in r.content, (
'Unexpected validation error (verify_app_domain)')
def test_no_upload(self):
data = {'free_platforms': ['free-desktop']}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_bad_upload(self, fi_mock):
data = {'free_platforms': ['free-desktop'], 'upload': 'foo'}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
assert not fi_mock.delay.called, (
'The fetch_icon task was not expected to be called')
def test_hint_for_same_manifest(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
assert 'Oops' in data['validation']['messages'][0]['message'], (
'Expected oops')
def test_no_hint_for_same_manifest_different_author(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
# Submit same manifest as different user.
assert self.client.login(username='clouserw@gmail.com',
password='password')
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; only one app per domain is '
'allowed.')
def test_app_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.is_packaged, False)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'MozillaBall ょ')
eq_(addon.slug, 'app-%s' % addon.id)
eq_(addon.app_slug, u'mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, u'http://allizom.org/mozball.webapp')
eq_(addon.app_domain, u'http://allizom.org')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.latest_version.developer_name, 'Mozilla Labs')
eq_(addon.latest_version.manifest,
json.loads(open(self.manifest).read()))
def test_manifest_with_any_extension(self):
self.manifest = os.path.join(settings.ROOT, 'mkt', 'developers',
'tests', 'addons', 'mozball.owa')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
ok_(addon.id)
def test_version_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.latest_version.version, '1.0')
def test_file_from_uploaded_manifest(self):
addon = self.post_addon()
files = addon.latest_version.files.all()
eq_(len(files), 1)
eq_(files[0].status, amo.STATUS_PENDING)
def test_set_platform(self):
app = self.post_addon(
{'free_platforms': ['free-android-tablet', 'free-desktop']})
self.assertSetEqual(app.device_types,
[amo.DEVICE_TABLET, amo.DEVICE_DESKTOP])
def test_free(self):
app = self.post_addon({'free_platforms': ['free-firefoxos']})
self.assertSetEqual(app.device_types, [amo.DEVICE_GAIA])
eq_(app.premium_type, amo.ADDON_FREE)
def test_premium(self):
app = self.post_addon({'paid_platforms': ['paid-firefoxos']})
self.assertSetEqual(app.device_types, [amo.DEVICE_GAIA])
eq_(app.premium_type, amo.ADDON_PREMIUM)
def test_supported_locales(self):
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_short_locale(self):
# This manifest has a locale code of "pt" which is in the
# SHORTER_LANGUAGES setting and should get converted to "pt-PT".
self.manifest = self.manifest_path('short-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
eq_(addon.default_locale, 'pt-PT')
eq_(addon.versions.latest().supported_locales, 'es')
def test_unsupported_detail_locale(self):
# This manifest has a locale code of "en-GB" which is unsupported, so
# we default to "en-US".
self.manifest = self.manifest_path('unsupported-default-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_appfeatures_creation(self):
addon = self.post_addon(data={
'free_platforms': ['free-desktop'],
'has_contacts': 'on'
})
features = addon.latest_version.features
ok_(isinstance(features, AppFeatures))
field_names = [f.name for f in AppFeaturesForm().all_fields()]
for field in field_names:
expected = field == 'has_contacts'
eq_(getattr(features, field), expected)
class TestCreateWebAppFromManifest(BaseWebAppTest):
def setUp(self):
super(TestCreateWebAppFromManifest, self).setUp()
Webapp.objects.create(app_slug='xxx',
app_domain='http://existing-app.com')
def upload_webapp(self, manifest_url, **post_kw):
self.upload.update(name=manifest_url) # Simulate JS upload.
return self.post(**post_kw)
def post_manifest(self, manifest_url):
rs = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=manifest_url))
if 'json' in rs['content-type']:
rs = json.loads(rs.content)
return rs
def test_duplicate_domain(self):
self.create_switch(name='webapps-unique-by-domain')
rs = self.upload_webapp('http://existing-app.com/my.webapp',
expect_errors=True)
eq_(rs.context['form'].errors,
{'upload':
['An app already exists on this domain; only one '
'app per domain is allowed.']})
def test_allow_duplicate_domains(self):
self.upload_webapp('http://existing-app.com/my.webapp') # No errors.
def test_duplicate_domain_from_js(self):
self.create_switch(name='webapps-unique-by-domain')
data = self.post_manifest('http://existing-app.com/my.webapp')
eq_(data['validation']['errors'], 1)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; '
'only one app per domain is allowed.')
def test_allow_duplicate_domains_from_js(self):
rs = self.post_manifest('http://existing-app.com/my.webapp')
eq_(rs.status_code, 302)
class BasePackagedAppTest(BaseUploadTest, UploadAddon, amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(BasePackagedAppTest, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.app.update(is_packaged=True)
self.version = self.app.latest_version
self.file = self.version.all_files[0]
self.file.update(filename='mozball.zip')
self.upload = self.get_upload(abspath=self.package,
user=UserProfile.objects.get(pk=999))
self.upload.update(name='mozball.zip')
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
@property
def package(self):
return self.packaged_app_path('mozball.zip')
def post_addon(self, data=None):
eq_(Webapp.objects.count(), 1)
self.post(data=data)
return Webapp.objects.order_by('-id')[0]
def setup_files(self, filename='mozball.zip'):
# Make sure the source file is there.
# Original packaged file.
if not storage.exists(self.file.file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.file_path)
# Signed packaged file.
if not storage.exists(self.file.signed_file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.signed_file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.signed_file_path)
class TestEscalatePrereleaseWebApp(BasePackagedAppTest):
def setUp(self):
super(TestEscalatePrereleaseWebApp, self).setUp()
UserProfile.objects.create(email=settings.NOBODY_EMAIL_ADDRESS)
def post(self):
super(TestEscalatePrereleaseWebApp, self).post(data={
'free_platforms': ['free-firefoxos'],
'packaged': True,
})
def test_prerelease_permissions_get_escalated(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['moz-attention']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 1)
def test_normal_permissions_dont_get_escalated(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['contacts']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 0)
class TestCreatePackagedApp(BasePackagedAppTest):
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_post_app_redirect(self, _mock):
res = self.post()
webapp = Webapp.objects.order_by('-created')[0]
self.assert3xx(res,
reverse('submit.app.details', args=[webapp.app_slug]))
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_app_from_uploaded_package(self, _verify, _mock):
addon = self.post_addon(
data={'packaged': True, 'free_platforms': ['free-firefoxos']})
eq_(addon.latest_version.version, '1.0')
eq_(addon.is_packaged, True)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'Packaged MozillaBall ょ')
eq_(addon.slug, 'app-%s' % addon.id)
eq_(addon.app_slug, u'packaged-mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, None)
eq_(addon.app_domain, 'app://hy.fr')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.latest_version.developer_name, 'Mozilla Labs')
assert _verify.called, (
'`verify_app_domain` should be called for packaged apps with '
'origins.')
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_packaged_app_not_unique(self, _mock):
Webapp.objects.create(is_packaged=True, app_domain='app://hy.fr')
res = self.post(
data={'packaged': True, 'free_platforms': ['free-firefoxos']},
expect_errors=True)
eq_(res.context['form'].errors, {
'upload': ['An app already exists on this domain; only one app '
'per domain is allowed.']})
class TestDetails(TestSubmit):
fixtures = fixture('webapp_337141', 'user_999', 'user_10482')
def setUp(self):
super(TestDetails, self).setUp()
self.webapp = self.get_webapp()
self.webapp.update(status=amo.STATUS_NULL)
self.url = reverse('submit.app.details', args=[self.webapp.app_slug])
self.cat1 = 'books'
def get_webapp(self):
return Webapp.objects.get(id=337141)
def upload_preview(self, image_file=None):
if not image_file:
image_file = get_image_path('preview.jpg')
return self._upload_image(self.webapp.get_dev_url('upload_preview'),
image_file=image_file)
def upload_icon(self, image_file=None):
if not image_file:
image_file = get_image_path('mozilla-sq.png')
return self._upload_image(self.webapp.get_dev_url('upload_icon'),
image_file=image_file)
def _upload_image(self, url, image_file):
with open(image_file, 'rb') as data:
rp = self.client.post(url, {'upload_image': data})
eq_(rp.status_code, 200)
hash_ = json.loads(rp.content)['upload_hash']
assert hash_, 'No hash: %s' % rp.content
return hash_
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
self.cl = AppSubmissionChecklist.objects.create(addon=self.webapp,
terms=True, manifest=True)
# Associate app with user.
AddonUser.objects.create(addon=self.webapp, user=self.user)
# Associate device type with app.
self.dtype = DEVICE_TYPES.values()[0]
AddonDeviceType.objects.create(addon=self.webapp,
device_type=self.dtype.id)
self.device_types = [self.dtype]
# Associate category with app.
self.webapp.update(categories=[self.cat1])
def test_anonymous(self):
self._test_anonymous()
def test_resume_later(self):
self._step()
self.webapp.appsubmissionchecklist.update(details=True)
r = self.client.get(reverse('submit.app.resume',
args=[self.webapp.app_slug]))
self.assert3xx(r, self.webapp.get_dev_url('edit'))
def test_not_owner(self):
self._step()
assert self.client.login(username='clouserw@gmail.com',
password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#submit-details').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest'], 'details')
def new_preview_formset(self, *args, **kw):
ctx = self.client.get(self.url).context
blank = initial(ctx['form_previews'].forms[-1])
blank.update(**kw)
return blank
def preview_formset(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.new_preview_formset()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def get_dict(self, **kw):
data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'homepage': 'http://www.goodreads.com/user/show/7595895-krupa',
'support_url': 'http://www.goodreads.com/user_challenges/351558',
'support_email': 'krupa+to+the+rescue@goodreads.com',
'categories': [self.cat1],
'flash': '1',
'publish_type': amo.PUBLISH_IMMEDIATE,
'notes': 'yes'
}
# Add the required screenshot.
data.update(self.preview_formset({
'upload_hash': '<hash>',
'position': 0
}))
data.update(**kw)
# Remove fields without values.
data = dict((k, v) for k, v in data.iteritems() if v is not None)
return data
def check_dict(self, data=None, expected=None):
if data is None:
data = self.get_dict()
addon = self.get_webapp()
# Build a dictionary of expected results.
expected_data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'uses_flash': True,
'publish_type': amo.PUBLISH_IMMEDIATE,
}
if expected:
expected_data.update(expected)
uses_flash = expected_data.pop('uses_flash')
eq_(addon.latest_version.all_files[0].uses_flash, uses_flash)
self.assertSetEqual(addon.device_types, self.device_types)
for field, expected in expected_data.iteritems():
got = unicode(getattr(addon, field))
expected = unicode(expected)
eq_(got, expected,
'Expected %r for %r. Got %r.' % (expected, field, got))
@mock.patch('mkt.submit.views.record_action')
def test_success(self, record_action):
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_NULL)
assert record_action.called
def test_success_paid(self):
self._step()
self.webapp = self.get_webapp()
self.make_premium(self.webapp)
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_NULL)
eq_(self.webapp.highest_status, amo.STATUS_PENDING)
def test_success_prefill_device_types_if_empty(self):
"""
The new submission flow asks for device types at step one.
This ensures that existing incomplete apps still have device
compatibility.
"""
self._step()
AddonDeviceType.objects.all().delete()
self.device_types = amo.DEVICE_TYPES.values()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_success_for_approved(self):
self._step()
data = self.get_dict(publish_type=amo.PUBLISH_PRIVATE)
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data,
expected={'publish_type': amo.PUBLISH_PRIVATE})
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_media_types(self):
self._step()
res = self.client.get(self.url)
doc = pq(res.content)
eq_(doc('.screenshot_upload').attr('data-allowed-types'),
'image/jpeg|image/png|video/webm')
eq_(doc('#id_icon_upload').attr('data-allowed-types'),
'image/jpeg|image/png')
def test_screenshot(self):
self._step()
im_hash = self.upload_preview()
data = self.get_dict()
data.update(self.preview_formset({
'upload_hash': im_hash,
'position': 0
}))
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = Webapp.objects.get(pk=self.webapp.pk)
eq_(ad.previews.all().count(), 1)
def test_icon(self):
self._step()
im_hash = self.upload_icon()
data = self.get_dict()
data['icon_upload_hash'] = im_hash
data['icon_type'] = 'image/png'
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = self.get_webapp()
eq_(ad.icon_type, 'image/png')
for size in amo.APP_ICON_SIZES:
fn = '%s-%s.png' % (ad.id, size)
assert os.path.exists(os.path.join(ad.get_icon_dir(), fn)), (
'Expected %s in %s' % (fn, os.listdir(ad.get_icon_dir())))
def test_screenshot_or_video_required(self):
self._step()
data = self.get_dict()
for k in data:
if k.startswith('files') and k.endswith('upload_hash'):
data[k] = ''
rp = self.client.post(self.url, data)
eq_(rp.context['form_previews'].non_form_errors(),
['You must upload at least one screenshot or video.'])
def test_unsaved_screenshot(self):
self._step()
# If there are form errors we should still pass the previews URIs.
preview_type = 'video/webm'
preview_uri = 'moz-filedata:p00p'
data = self.preview_formset({
'position': 1,
'upload_hash': '<hash_one>',
'unsaved_image_type': preview_type,
'unsaved_image_data': preview_uri
})
r = self.client.post(self.url, data)
eq_(r.status_code, 200)
form = pq(r.content)('form')
eq_(form.find('input[name=files-0-unsaved_image_type]').val(),
preview_type)
eq_(form.find('input[name=files-0-unsaved_image_data]').val(),
preview_uri)
def test_unique_allowed(self):
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, amo.STATUS_NULL)
def test_slug_invalid(self):
self._step()
# Submit an invalid slug.
d = self.get_dict(app_slug='slug!!! aksl23%%')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
"Enter a valid 'slug' consisting of letters, numbers, underscores "
"or hyphens.")
def test_slug_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(app_slug=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
'This field is required.')
def test_description_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(description=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'description',
'This field is required.')
def test_privacy_policy_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(privacy_policy=None))
self.assertFormError(r, 'form_basic', 'privacy_policy',
'This field is required.')
def test_clashing_locale(self):
self.webapp.default_locale = 'de'
self.webapp.save()
self._step()
self.client.cookies['current_locale'] = 'en-us'
data = self.get_dict(name=None, name_de='Test name',
privacy_policy=None,
**{'privacy_policy_en-us': 'XXX'})
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
def test_homepage_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage=None))
self.assertNoFormErrors(r)
def test_homepage_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage='xxx'))
self.assertFormError(r, 'form_basic', 'homepage', 'Enter a valid URL.')
def test_support_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url=None))
self.assertNoFormErrors(r)
def test_support_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url='xxx'))
self.assertFormError(r, 'form_basic', 'support_url',
'Enter a valid URL.')
def test_support_email_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email=None))
self.assertFormError(r, 'form_basic', 'support_email',
'This field is required.')
def test_support_email_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email='xxx'))
self.assertFormError(r, 'form_basic', 'support_email',
'Enter a valid email address.')
def test_categories_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(categories=[]))
eq_(r.context['form_cats'].errors['categories'],
['This field is required.'])
def test_categories_max(self):
self._step()
eq_(amo.MAX_CATEGORIES, 2)
cat2 = 'games'
cat3 = 'social'
cats = [self.cat1, cat2, cat3]
r = self.client.post(self.url, self.get_dict(categories=cats))
eq_(r.context['form_cats'].errors['categories'],
['You can have only 2 categories.'])
def _post_cats(self, cats):
self.client.post(self.url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories), sorted(cats))
def test_categories_add(self):
self._step()
cat2 = 'games'
self._post_cats([self.cat1, cat2])
def test_categories_add_and_remove(self):
self._step()
cat2 = 'games'
self._post_cats([cat2])
def test_categories_remove(self):
# Add another category here so it gets added to the initial formset.
cat2 = 'games'
self.webapp.update(categories=[self.cat1, cat2])
self._step()
# `cat2` should get removed.
self._post_cats([self.cat1])
class TestDone(TestSubmit):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
super(TestDone, self).setUp()
self.webapp = self.get_webapp()
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def _step(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
def test_anonymous(self):
self._test_anonymous()
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest', 'details'],
'next_steps')
def test_done(self):
self._step()
res = self.client.get(self.url)
eq_(res.status_code, 200)
class TestNextSteps(amo.tests.TestCase):
# TODO: Delete this test suite once we deploy IARC.
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.user = UserProfile.objects.get(username='regularuser')
assert self.client.login(username=self.user.email, password='password')
self.webapp = Webapp.objects.get(id=337141)
self.webapp.update(status=amo.STATUS_PENDING)
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def test_200(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
res = self.client.get(self.url)
eq_(res.status_code, 200)
|
{
"content_hash": "725aa4553c59badd14be069358f49876",
"timestamp": "",
"source": "github",
"line_count": 1020,
"max_line_length": 79,
"avg_line_length": 37.772549019607844,
"alnum_prop": 0.5941393272425249,
"repo_name": "ngokevin/zamboni",
"id": "c2da056ab12e9195043e46d2ced910dfbdb76b0a",
"size": "38560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/submit/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356777"
},
{
"name": "JavaScript",
"bytes": "536388"
},
{
"name": "Python",
"bytes": "3883015"
},
{
"name": "Shell",
"bytes": "13597"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.auth.models import User
from datetime import datetime
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils.encoding import force_unicode
DEFAULT_MAX_COMMENT_LENGTH = getattr(settings, 'DEFAULT_MAX_COMMENT_LENGTH',
1000)
DEFAULT_MAX_COMMENT_DEPTH = getattr(settings, 'DEFAULT_MAX_COMMENT_DEPTH', 8)
MARKDOWN = 1
TEXTILE = 2
REST = 3
#HTML = 4
PLAINTEXT = 5
MARKUP_CHOICES = (
(MARKDOWN, _("Markdown")),
(PLAINTEXT, _("Plain text")),
)
DEFAULT_MARKUP = getattr(settings, 'DEFAULT_MARKUP', PLAINTEXT)
def dfs(node, all_nodes, depth):
"""
Performs a recursive depth-first search starting at ``node``. This
function also annotates an attribute, ``depth``, which is an integer that
represents how deeply nested this node is away from the original object.
"""
node.depth = depth
to_return = [node, ]
for subnode in all_nodes:
if subnode.parent and subnode.parent.id == node.id:
to_return.extend(dfs(subnode, all_nodes, depth + 1))
return to_return
class ThreadedCommentManager(models.Manager):
"""
A ``Manager`` which will be attached to each comment model. It helps to
facilitate the retrieval of comments in tree form and also has utility
methods for creating and retrieving objects related to a specific content
object.
"""
def get_tree(self, content_object, root=None):
"""
Runs a depth-first search on all comments related to the given
content_object. This depth-first search adds a ``depth`` attribute to
the comment which signifies how how deeply nested the comment is away
from the original object.
If root is specified, it will start the tree from that comment's ID.
Ideally, one would use this ``depth`` attribute in the display of the
comment to offset that comment by some specified length.
The following is a (VERY) simple example of how the depth property
might be used in a template:
{% for comment in comment_tree %}
<p style="margin-left: {{ comment.depth }}em">
{{ comment.comment }}
</p>
{% endfor %}
"""
content_type = ContentType.objects.get_for_model(content_object)
children = list(self.get_queryset().filter(
content_type=content_type,
object_id=getattr(
content_object, 'pk', getattr(content_object, 'id')),
).select_related().order_by('date_submitted'))
to_return = []
if root:
if isinstance(root, int):
root_id = root
else:
root_id = root.id
to_return = [c for c in children if c.id == root_id]
if to_return:
to_return[0].depth = 0
for child in children:
if child.parent_id == root_id:
to_return.extend(dfs(child, children, 1))
else:
for child in children:
if not child.parent:
to_return.extend(dfs(child, children, 0))
return to_return
def _generate_object_kwarg_dict(self, content_object, **kwargs):
"""
Generates the most comment keyword arguments for a given
``content_object``.
"""
kwargs['content_type'] = ContentType.objects.get_for_model(
content_object)
kwargs['object_id'] = getattr(
content_object, 'pk', getattr(content_object, 'id'))
return kwargs
def create_for_object(self, content_object, **kwargs):
"""
A simple wrapper around ``create`` for a given ``content_object``.
"""
return self.create(**self._generate_object_kwarg_dict(
content_object, **kwargs))
def get_or_create_for_object(self, content_object, **kwargs):
"""
A simple wrapper around ``get_or_create`` for a given
``content_object``.
"""
return self.get_or_create(**self._generate_object_kwarg_dict(
content_object, **kwargs))
def get_for_object(self, content_object, **kwargs):
"""
A simple wrapper around ``get`` for a given ``content_object``.
"""
return self.get(**self._generate_object_kwarg_dict(
content_object, **kwargs))
def all_for_object(self, content_object, **kwargs):
"""
Prepopulates a QuerySet with all comments related to the given
``content_object``.
"""
return self.filter(**self._generate_object_kwarg_dict(
content_object, **kwargs))
class PublicThreadedCommentManager(ThreadedCommentManager):
"""
A ``Manager`` which borrows all of the same methods from
``ThreadedCommentManager``, but which also restricts the queryset to only
the published methods (in other words, ``is_public = True``).
"""
def get_queryset(self):
return super(ThreadedCommentManager, self).get_queryset().filter(
Q(is_public=True) | Q(is_approved=True)
)
class ThreadedComment(models.Model):
"""
A threaded comment which must be associated with an instance of
``django.contrib.auth.models.User``. It is given its hierarchy by a
nullable relationship back on itself named ``parent``.
This ``ThreadedComment`` supports several kinds of markup languages,
including Textile, Markdown, and ReST.
It also includes two Managers: ``objects``, which is the same as the normal
``objects`` Manager with a few added utility functions (see above), and
``public``, which has those same utility functions but limits the QuerySet
to only those values which are designated as public (``is_public=True``).
"""
# Generic Foreign Key Fields
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField(_('object ID'))
content_object = GenericForeignKey()
# Hierarchy Field
parent = models.ForeignKey(
'self', null=True, blank=True, default=None, related_name='children')
# User Field
user = models.ForeignKey(User)
# Date Fields
date_submitted = models.DateTimeField(
_('date/time submitted'), default=datetime.now)
date_modified = models.DateTimeField(
_('date/time modified'), default=datetime.now)
date_approved = models.DateTimeField(
_('date/time approved'), default=None, null=True, blank=True)
# Meat n' Potatoes
comment = models.TextField(verbose_name=_('Comment'))
markup = models.IntegerField(
choices=MARKUP_CHOICES, default=DEFAULT_MARKUP, blank=True)
# Status Fields
is_public = models.BooleanField(_('is public'), default=True)
is_approved = models.BooleanField(_('is approved'), default=False)
# Extra Field
ip_address = models.GenericIPAddressField(_('IP address'), null=True,
blank=True)
objects = ThreadedCommentManager()
public = PublicThreadedCommentManager()
def __unicode__(self):
if len(self.comment) > 50:
return self.comment[:50] + "..."
return self.comment[:50]
def save(self, **kwargs):
if not self.markup:
self.markup = DEFAULT_MARKUP
self.date_modified = datetime.now()
if not self.date_approved and self.is_approved:
self.date_approved = datetime.now()
super(ThreadedComment, self).save(**kwargs)
def get_content_object(self):
"""
Wrapper around the GenericForeignKey due to compatibility reasons
and due to ``list_display`` limitations.
"""
return self.content_object
def get_base_data(self, show_dates=True):
"""
Outputs a Python dictionary representing the most useful bits of
information about this particular object instance.
This is mostly useful for testing purposes, as the output from the
serializer changes from run to run. However, this may end up being
useful for JSON and/or XML data exchange going forward and as the
serializer system is changed.
"""
markup = "plaintext"
for markup_choice in MARKUP_CHOICES:
if self.markup == markup_choice[0]:
markup = markup_choice[1]
break
to_return = {
'content_object': self.content_object,
'parent': self.parent,
'user': self.user,
'comment': self.comment,
'is_public': self.is_public,
'is_approved': self.is_approved,
'ip_address': self.ip_address,
'markup': force_unicode(markup),
}
if show_dates:
to_return['date_submitted'] = self.date_submitted
to_return['date_modified'] = self.date_modified
to_return['date_approved'] = self.date_approved
return to_return
class Meta:
ordering = ('-date_submitted',)
verbose_name = _("Threaded Comment")
verbose_name_plural = _("Threaded Comments")
get_latest_by = "date_submitted"
class FreeThreadedComment(models.Model):
"""
A threaded comment which need not be associated with an instance of
``django.contrib.auth.models.User``. Instead, it requires minimally a
name, and maximally a name, website, and e-mail address. It is given its
hierarchy by a nullable relationship back on itself named ``parent``.
This ``FreeThreadedComment`` supports several kinds of markup languages,
including Textile, Markdown, and ReST.
It also includes two Managers: ``objects``, which is the same as the normal
``objects`` Manager with a few added utility functions (see above), and
``public``, which has those same utility functions but limits the QuerySet
to only those values which are designated as public (``is_public=True``).
"""
# Generic Foreign Key Fields
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField(_('object ID'))
content_object = GenericForeignKey()
# Hierarchy Field
parent = models.ForeignKey(
'self', null=True, blank=True, default=None, related_name='children')
# User-Replacement Fields
name = models.CharField(_('name'), max_length=128)
website = models.URLField(_('site'), blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
# Date Fields
date_submitted = models.DateTimeField(
_('date/time submitted'), default=datetime.now)
date_modified = models.DateTimeField(
_('date/time modified'), default=datetime.now)
date_approved = models.DateTimeField(
_('date/time approved'), default=None, null=True, blank=True)
# Meat n' Potatoes
comment = models.TextField(_('comment'))
markup = models.IntegerField(
choices=MARKUP_CHOICES, default=DEFAULT_MARKUP, null=True, blank=True)
# Status Fields
is_public = models.BooleanField(_('is public'), default=True)
is_approved = models.BooleanField(_('is approved'), default=False)
# Extra Field
ip_address = models.GenericIPAddressField(_('IP address'), null=True,
blank=True)
objects = ThreadedCommentManager()
public = PublicThreadedCommentManager()
def __unicode__(self):
if len(self.comment) > 50:
return self.comment[:50] + "..."
return self.comment[:50]
def save(self, **kwargs):
if not self.markup:
self.markup = DEFAULT_MARKUP
self.date_modified = datetime.now()
if not self.date_approved and self.is_approved:
self.date_approved = datetime.now()
super(FreeThreadedComment, self).save()
def get_content_object(self, **kwargs):
"""
Wrapper around the GenericForeignKey due to compatibility reasons
and due to ``list_display`` limitations.
"""
return self.content_object
def get_base_data(self, show_dates=True):
"""
Outputs a Python dictionary representing the most useful bits of
information about this particular object instance.
This is mostly useful for testing purposes, as the output from the
serializer changes from run to run. However, this may end up being
useful for JSON and/or XML data exchange going forward and as the
serializer system is changed.
"""
markup = "plaintext"
for markup_choice in MARKUP_CHOICES:
if self.markup == markup_choice[0]:
markup = markup_choice[1]
break
to_return = {
'content_object': self.content_object,
'parent': self.parent,
'name': self.name,
'website': self.website,
'email': self.email,
'comment': self.comment,
'is_public': self.is_public,
'is_approved': self.is_approved,
'ip_address': self.ip_address,
'markup': force_unicode(markup),
}
if show_dates:
to_return['date_submitted'] = self.date_submitted
to_return['date_modified'] = self.date_modified
to_return['date_approved'] = self.date_approved
return to_return
class Meta:
ordering = ('-date_submitted',)
verbose_name = _("Free Threaded Comment")
verbose_name_plural = _("Free Threaded Comments")
get_latest_by = "date_submitted"
class TestModel(models.Model):
"""
This model is simply used by this application's test suite as a model to
which to attach comments.
"""
name = models.CharField(max_length=5)
is_public = models.BooleanField(default=True)
date = models.DateTimeField(default=datetime.now)
|
{
"content_hash": "6fde048c6f50c8b4ff49ed67966412ce",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 79,
"avg_line_length": 36.809399477806785,
"alnum_prop": 0.6254078592708185,
"repo_name": "fgirault/smeuhsocial",
"id": "97d5229d41009ab59fbea032eddc562d4e377cc9",
"size": "14098",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/threadedcomments/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "72383"
},
{
"name": "HTML",
"bytes": "224460"
},
{
"name": "JavaScript",
"bytes": "18844"
},
{
"name": "Makefile",
"bytes": "3102"
},
{
"name": "Python",
"bytes": "690632"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
from gensim import corpora, matutils
import MeCab
import zenhan
import pandas as pd
DATA_PATH = './text/stretch_data.txt'
DICTIONARY_FILE_NAME = './dict/stretch_dic.txt'
mecab = MeCab.Tagger('mecabrc')
"""
TODO: Receive pandas Series and generate dict
Now this script reads files.
"""
def tokenize(text):
'''
とりあえず形態素解析して名詞だけ取り出す感じにしてる
Extract alphabet as lower, hankaku, space-trimed
'''
node = mecab.parseToNode(text)
while node:
if node.feature.split(',')[0] == '名詞':
try:
yield zenhan.z2h(node.surface.lower().strip())
except:
yield '0'
node = node.next
def check_stopwords(word):
'''
ストップワードだったらTrueを返す
'''
if re.search('^[0-9]+$', word): # 数字だけ
return True
if re.search('^[0-9a-zA-Z\u3041-\u3093\u30A1-\u30F6\u4E00-\u9FD0]+$', word): # White list [Alphabet, Kana, Kanji]
return False
return True
def get_words(contents):
'''
記事群のdictについて、形態素解析して返す
'''
ret = []
for k, content in contents.items():
ret.append(get_words_main(content))
return ret
def get_words_main(content):
'''
一つの記事を形態素解析して返す
'''
return [token for token in tokenize(content) if not check_stopwords(token)]
def filter_dictionary(dictionary):
'''
低頻度と高頻度のワードを除く感じで
'''
dictionary.filter_extremes(no_below=20, no_above=0.5) # この数字はあとで変えるかも
return dictionary
def get_contents():
'''
Extract stretch data
'''
df = pd.read_csv(DATA_PATH)
return (df['KIBO_TANTOGYOMU_MEMO'] + df['KIBO_HOSPITALTYPE_MEMO'] + df['CONSCOMMENT'] + df['SHIGOTONAIYO']).to_dict()
def get_vector(dictionary, content):
'''
Analyze content and return a vector of feature using dictionary.
@param gensim_dict, str
@return vector
'''
tmp = dictionary.doc2bow(get_words_main(content))
dense = list(matutils.corpus2dense([tmp], num_terms=len(dictionary)).T[0])
return dense
def get_dictionary(create_flg=False, file_name=DICTIONARY_FILE_NAME):
'''
辞書を作る
'''
if create_flg or not os.path.exists(file_name):
# データ読み込み
contents = get_contents()
# 形態素解析して名詞だけ取り出す
words = get_words(contents)
# 辞書作成、そのあとフィルタかける
dictionary = filter_dictionary(corpora.Dictionary(words))
# 保存しておく
if file_name is None:
sys.exit()
dictionary.save_as_text(file_name)
else:
# 通常はファイルから読み込むだけにする
dictionary = corpora.Dictionary.load_from_text(file_name)
return dictionary
if __name__ == '__main__':
get_dictionary(create_flg=True)
|
{
"content_hash": "8a49f8996b17e6b6fd60eb1bb774e2c9",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 121,
"avg_line_length": 23.189655172413794,
"alnum_prop": 0.6219330855018588,
"repo_name": "acro5piano/text-similarity",
"id": "57a8fb23ea3a32b34ff1114089f957aaf5ac88a8",
"size": "3042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corpus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3882"
}
],
"symlink_target": ""
}
|
"""
Given an array of integers, find out whether there are two distinct indices i and j in the array such that the absolute difference between nums[i] and nums[j] is at most t and the absolute difference between i and j is at most k.
Example 1:
Input: nums = [1,2,3,1], k = 3, t = 0
Output: true
Example 2:
Input: nums = [1,0,1,1], k = 1, t = 2
Output: true
Example 3:
Input: nums = [1,5,9,1,5,9], k = 2, t = 3
Output: false
"""
class Solution:
def containsNearbyAlmostDuplicate(self, nums: List[int], k: int, t: int) -> bool:
# Bucket sort. Each bucket has size of t. For each number, the possible
# candidate can only be in the same bucket or the two buckets besides.
# Keep as many as k buckets to ensure that the difference is at most k.
buckets = {}
for i, v in enumerate(nums):
# t == 0 is a special case where we only have to check the bucket
# that v is in.
bucketNum, offset = (v // t, 1) if t else (v, 0)
for idx in range(bucketNum - offset, bucketNum + offset + 1):
if idx in buckets and abs(buckets[idx] - nums[i]) <= t:
return True
buckets[bucketNum] = nums[i]
if len(buckets) > k:
# Remove the bucket which is too far away. Beware of zero t.
del buckets[nums[i - k] // t if t else nums[i - k]]
return False
|
{
"content_hash": "13247857130388bbda165913ac192efa",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 229,
"avg_line_length": 37.39473684210526,
"alnum_prop": 0.5932441942294159,
"repo_name": "franklingu/leetcode-solutions",
"id": "913fe73f05d9e8f64cc3e5af99fcd6b0501509e7",
"size": "1421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/contains-duplicate-iii/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
}
|
from BonusAllocator import BonusAllocator
from IOHmmModel import IOHmmModel
import numpy as np
import mdptoolbox
class MLSAllocator(BonusAllocator):
def __init__(self, num_workers, len_seq=10, base_cost=5, bns=2, t=10, weights=None):
super(MLSAllocator, self).__init__(num_workers, base_cost, bns, t, weights)
print 'init an mls-mdp bonus allocator'
self.__len_seq = len_seq
self.__nstates = 0 # number of hidden states, denoted as S
self.__ostates = 0 # number of observations, denoted as O
self.__strt_prob = None # start probability of hidden states, shape = 1 * S fake...
self.__tmat0 = None # transition matrix with no bonus, shape = S * S, returned after training
self.__tmat1 = None # transiton matrix with bonus, shape = S * S, returned after training
self.__emat = None # emission matrix, shpe = S * O, returned after training
self.__numitr = 0
self.__policy = None # bonus policy
self.set_parameters()
def set_parameters(self, nstates=2, ostates=2, strt_prob=None, numitr=1000):
if strt_prob is None:
strt_prob = [1.0 / nstates for _ in range(nstates)]
self.__nstates = nstates # number of hidden states
self.__ostates = ostates # number of observations
self.__numitr = numitr # number of iteration in EM algorithm
self.__strt_prob = strt_prob
def train(self, model):
model_param = model.get_model()
self.__tmat0 = model_param[0]
self.__tmat1 = model_param[1]
self.__emat = model_param[2]
p = np.array([self.__tmat0, self.__tmat1])
r = list()
r.append([sum([self.__tmat0[k][i] *
(self.__emat[i][0] * self.weights[0] + self.__emat[i][1] * self.weights[1])
for i in range(self.__nstates)]) for k in range(self.__nstates)])
r.append([sum([self.__tmat1[k][i] *
(self.__emat[i][0] * self.weights[0] + self.__emat[i][1] *
(self.weights[1] - self.weights[2]))
for i in range(self.__nstates)]) for k in range(self.__nstates)])
r = np.transpose(np.array(r))
def mdp_policy(horizon):
fh = mdptoolbox.mdp.FiniteHorizon(p, r, 0.9, horizon)
fh.run()
return list(fh.policy)
self.__policy = map(mdp_policy, range(1, self._t + 1))
def __viterbi(self, in_obs, ou_obs): # tmats[0] transition matrix when not bonus
t_val = list()
t_val.append([self.__strt_prob[i] * self.__emat[i][ou_obs[0]] for i in range(self.__nstates)]) # 1 * N
tmats = (self.__tmat0, self.__tmat1)
for cur_t in range(1, len(in_obs), 1):
t_val.append([])
for j in range(self.__nstates):
tmp_val = [t_val[cur_t - 1][i] * tmats[in_obs[cur_t]][i][j] * self.__emat[j][ou_obs[cur_t]]
for i in range(self.__nstates)]
t_val[cur_t].append(np.max(tmp_val))
t_val[cur_t] = [float(cur_v) / sum(t_val[cur_t]) for cur_v in t_val[cur_t]]
return np.argmax(t_val[-1])
def bonus_alloc(self, in_obs, ou_obs):
if self.__policy is not None and in_obs is not None and ou_obs is not None:
tc = len(in_obs) % self._t
return self._base_cost + self._bns * self.__policy[self._t - tc - 1][self.__viterbi(in_obs, ou_obs)][0]
else:
return self._base_cost + self._bns * np.random.choice(2, 1)[0]
|
{
"content_hash": "b15534f5adf1795feabfa990d831c2a8",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 115,
"avg_line_length": 45.64102564102564,
"alnum_prop": 0.5584269662921348,
"repo_name": "guanhuamai/DPM",
"id": "2631089263570be0445dbe644fbe16eec3c63d39",
"size": "3560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonSource/BonusAllocatorLib/MLSAllocator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M",
"bytes": "279"
},
{
"name": "Matlab",
"bytes": "25133"
},
{
"name": "Python",
"bytes": "91530"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
# Create your views here.
### Source material taken from:
### https://mayukhsaha.wordpress.com/2013/05/09/simple-login-and-user-registration-application-using-django/
### March 2, 2015
### No explicit license
from login.forms import *
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.contrib.auth.models import User
from django.views.decorators.csrf import csrf_protect
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from author.models import Author
from posts.models import Post
from posts.models import PRIVATE, FRIEND, FRIENDS, FOAF, PUBLIC, SERVERONLY
from posts.forms import *
from images.models import *
from posts.remote import reset_remote_posts
from author.remote import reset_remote_authors
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from author.remote import reset_remote_authors
from posts.remote import reset_remote_posts
import json
import requests
import unicodedata
from datetime import datetime
from dateutil import tz
@csrf_protect
def register(request):
#Create both a user and an author every time someone registers
if request.method == 'POST':
form=RegistrationForm(request.POST, request.FILES)
if form.is_valid():
if 'picture' in request.FILES:
picture=Image.objects.create(
image=request.FILES['picture'],
visibility=PUBLIC, #Profile pictures default visibility is PUBLIC
)
picture.save()
else:
picture = None
user = User.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'],
email=form.cleaned_data['email'],
)
user.is_active=False
user.save()
author=Author.objects.create(
user=user,
host = 'http://cs410.cs.ualberta.ca:41084/api',
picture=picture,
)
return HttpResponseRedirect('/register/success/')
else:
form = RegistrationForm()
variables = RequestContext(request, {
'form': form,
})
return render_to_response(
'registration/register.html',
variables,
)
def register_success(request):
return render_to_response(
'registration/success.html',
)
def logout_page(request):
logout(request)
return HttpResponseRedirect('/')
@login_required
def home(request):
#Note: attributes passed in here are all lowercase regardless of capitalization
if request.user.is_superuser:
return HttpResponseRedirect("/accounts/login/")
elif '/accounts/login/' in request.META.get('HTTP_REFERER', []):
try:
reset_remote_authors()
reset_remote_posts()
except Exception, e:
print str(e)
posts = Post.objects.all()
author = request.user.author
friends = [f for f in author.get_friends()]
fof_dict = author.get_friends_of_friends()
#Get everyone's public posts and get the posts from friends they received
public_posts = [p for p in posts if p.visibility == PUBLIC]
private_posts = [p for p in author.get_posts(visibility=PRIVATE)]
to_me_posts = [p for p in author.get_received_posts()]
friends_posts = []
posts_to_friends = [p for p in posts if p.visibility == FRIENDS]
for post in posts_to_friends:
if post.send_author in author.get_friends() or post.send_author == author:
friends_posts.append(post)
foaf_posts = []
for p in [p for p in posts if p.visibility == FOAF]:
foaf = any([(p.send_author in fof) for fof in fof_dict.values()])
if p.send_author in fof_dict.keys() or foaf:
foaf_posts.append(p)
#Get server posts
server_posts=[p for p in posts if p.visibility==SERVERONLY]
all_posts = set(public_posts + private_posts + to_me_posts +
friends_posts + foaf_posts+server_posts)
all_posts = sorted(all_posts, key=lambda x: x.published, reverse=True)
form= CommentForm()
personal_stream_toggle=True
paginator= Paginator(all_posts,8) #Show 8 posts per page
page= request.GET.get('page')
try:
posts=paginator.page(page)
except PageNotAnInteger: #If page isn't an integer deliver the first page
posts=paginator.page(1)
except EmptyPage:
posts=paginator.page(paginator.num_pages)
return render(request,
'home.html',
{
'user': request.user,
'author': request.user.author,
'posts': posts,
'form': form,
'personal_stream':personal_stream_toggle,
})
def authorhome(request, authorpage):
#Who is viewing their page?
viewer = request.user.author
author = Author.objects.get(uid=authorpage)
#Note: attributes passed in here are all lowercase regardless of capitalization
posts = set()
for post in Post.objects.all():
if (post.send_author == author):
posts.add(post)
#friends = [f for f in author.get_friends()]
friends = 0
for f in author.get_friends():
if (viewer == f):
friends = 1
#Get everyone's public posts and get the posts from friends they received
public_posts = [p for p in posts if p.visibility == PUBLIC]
if (viewer == author):
private_posts = [p for p in author.get_posts(visibility=PRIVATE)]
to_me_posts = [p for p in author.get_received_posts()]
else:
private_posts = list()
to_me_posts = list()
friends_posts = []
posts_to_friends = [p for p in posts if p.visibility == FRIENDS]
if (friends == 1) or (viewer==author):
friends_posts = posts_to_friends
foaf_posts = []
###TODO: FOAF *could* be implemented for an author's home page
### But currently I think it should just be posts made by that author.
all_posts = set(public_posts + friends_posts + private_posts + to_me_posts)
all_posts = sorted(all_posts, key=lambda x: x.published, reverse=True)
#Added pagination to the author page as well
paginator= Paginator(all_posts,8) #Show 8 posts per page
page= request.GET.get('page')
try:
posts=paginator.page(page)
except PageNotAnInteger:#If page isn't an integer deliver the first page
posts=paginator.page(1)
except EmptyPage:
posts=paginator.page(paginator.num_pages)
#Only if you are viewing your own page can you see your profile picture.
view_picture=False
if viewer==author:
if author.has_picture():
view_picture=True
return render(request,
'authorhome.html',
{
'user': author.user.username if author.user else author.displayname,
'email': author.user.email if author.user else None,
'author': request.user.author,
'posts': posts,
'view_picture':view_picture,
})
def personal_stream(request):
#Doesn't show all posts, just the posts an author is 'interested in'
#Assumed to be your github activity/stream,
#friends posts (both FRIENDS and PUBLIC visibilities), FOAF posts, private posts
#Also includes public/server posts of the people an author is following.
#Doesn't include public posts/server posts otherwise.
posts= Post.objects.all()
author = request.user.author
friends = [f for f in author.get_friends()]
fof_dict = author.get_friends_of_friends()
#Get private posts
private_posts = [p for p in author.get_posts(visibility=PRIVATE)]
#This part is rendered invalid by the coming change in post model
to_me_posts = [p for p in author.get_received_posts()]
#Get posts you made regardless of visibility.
by_me_posts= [p for p in author.get_posts()]
#Get posts made by friends with visibility=FRIENDS
friends_posts = []
posts_to_friends = [p for p in posts if p.visibility == FRIENDS]
for post in posts_to_friends:
if post.send_author in author.get_friends():
friends_posts.append(post)
#Get posts made by friends with visibility=PUBLIC
public_posts= [p for p in posts if p.visibility==PUBLIC]
public_by_friends=[]
for post in public_posts:
if post.send_author in author.get_friends():
public_by_friends.append(post)
#Get FOAF posts
foaf_posts = []
for p in [p for p in posts if p.visibility == FOAF]:
foaf = any([(p.send_author in fof) for fof in fof_dict.values()])
if p.send_author in fof_dict.keys() or foaf:
foaf_posts.append(p)
#Get posts made by you/friends with visibility=SERVER
all_server_posts=[p for p in posts if p.visibility==SERVERONLY]
server_posts=[]
for post in all_server_posts:
if post.send_author in author.get_friends() or post.send_author==author:
server_posts.append(post)
#Get the people that you are following and get their public/server posts.
#Presumably you shouldn't see their friends, private and foaf posts.
following_posts=[]
following= author.get_followees()
for person in following:
posts_public= person.get_posts(visibility=PUBLIC)
posts_server= person.get_posts(visibility=SERVERONLY)
for post in posts_public:
following_posts.append(post)
for post in posts_server:
following_posts.append(post)
all_posts = set(private_posts + friends_posts + foaf_posts + to_me_posts + by_me_posts + public_by_friends+server_posts+following_posts)
all_posts = sorted(all_posts, key=lambda x: x.published, reverse=True)
#Have a toggle/button to go back to the global stream
global_stream_toggle=True
paginator= Paginator(all_posts,8) #Show 8 posts per page
page= request.GET.get('page')
try:
posts=paginator.page(page)
except PageNotAnInteger:
#If page isn't an integer deliver the first page
posts=paginator.page(1)
except EmptyPage:
posts=paginator.page(paginator.num_pages)
form= CommentForm()
#Github stuff here
github_username=author.github
#If there is a github username, try and get their github activity.
if github_username!="":
activity=get_github_activity(github_username)
#If retrieving github activity was successful, pass it to the template
if activity !=None:
return render(request,
'home.html',
{
'user': request.user,
'author': request.user.author,
'posts': posts,
'form': form,
'global_stream':global_stream_toggle,
'github_activity':activity,
})
else:
return render(request,
'home.html',
{
'user': request.user,
'author': request.user.author,
'posts': posts,
'form': form,
'global_stream':global_stream_toggle,
})
#If there's no github username, don't bother.
return render(request,
'home.html',
{
'user': request.user,
'author': request.user.author,
'posts': posts,
'form': form,
'global_stream':global_stream_toggle,
})
def personal_stream_friends(request):
#Get friends posts (ones made by friends) only
#Has friends, public, FOAF and serveronly
posts= Post.objects.all()
author = request.user.author
friends = [f for f in author.get_friends()]
friends_posts = []
posts_to_friends = [p for p in posts if p.visibility == FRIENDS]
for post in posts_to_friends:
if post.send_author in author.get_friends():
friends_posts.append(post)
#Get posts made by friends with visibility=PUBLIC
public_posts= [p for p in posts if p.visibility==PUBLIC]
#print(public_posts)
public_by_friends=[]
for post in public_posts:
if post.send_author in author.get_friends():
public_by_friends.append(post)
#Get FOAF posts your friends made
fof_dict = author.get_friends_of_friends()
foaf_posts = []
for p in [p for p in posts if p.visibility == FOAF]:
foaf = any([(p.send_author in fof) for fof in fof_dict.values()])
if p.send_author in fof_dict.keys() or foaf:
#Only get FOAF posts that you didn't make and were made
#only by your friends
if p.send_author != author and p.send_author in author.get_friends():
foaf_posts.append(p)
#Get posts made by friends with visibility=SERVER
server_posts=[p for p in posts if p.visibility==SERVERONLY]
server_by_friends=[]
for post in server_posts:
if post.send_author in author.get_friends():
server_by_friends.append(post)
all_posts = set(friends_posts+public_by_friends+foaf_posts+server_by_friends)
all_posts = sorted(all_posts, key=lambda x: x.published, reverse=True)
#Have a toggle to go back to the global newsfeed
global_stream_toggle=True
paginator= Paginator(all_posts,8) #Show 8 posts per page
page= request.GET.get('page')
try:
posts=paginator.page(page)
except PageNotAnInteger:
#If page isn't an integer deliver the first page
posts=paginator.page(1)
except EmptyPage:
posts=paginator.page(paginator.num_pages)
form= CommentForm()
return render(request,
'home.html',
{
'user': request.user,
'author': request.user.author,
'posts': posts,
'form': form,
'global_stream':global_stream_toggle,
})
'''Used to convert github time to local time'''
def convert_git_time(github_time):
#Get the time at which you pushed to github, normalize and convert it to your local time
time=unicodedata.normalize("NFKD",github_time).encode("ascii","ignore")
new_time=time.split("T")
new_time= new_time[0]+" "+new_time[1][0:-1]
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
utc = datetime.strptime(new_time, '%Y-%m-%d %H:%M:%S')
# Tell the datetime object that it's in UTC time zone
utc = utc.replace(tzinfo=from_zone)
# Convert time zone to current time zone
the_time = utc.astimezone(to_zone)
the_time= the_time.strftime("%B %d, %Y, %I:%M%p")
return the_time
'''
There are 25 events, not gonna support all of them cause that's too much. Only cover the most used/relevant ones.
Supporting these 8 events because they seem like the most common ones and we are team 8:
IssueCommentEvent, PullRequestEvent, PushEvent, CreateEvent, DeleteEvent,
IssuesEvent, GollumEvent, PullRequestReviewCommentEvent.
'''
def get_github_activity(github_username):
#The URL for to get github activity from
url="https://api.github.com/users/"+github_username+"/events"
#Get all the events pertaining to a user's github URL
req= requests.get(url)
all_events= req.json()
status_code=req.status_code
#If status code isn't 200, some error happened. Return activity as None
if status_code!=200:
activity=None
return activity
#Put all github activity in this array
activity=[]
for event in all_events:
#The 'payload' of every event object
payload=event["payload"]
#The repository name of every event object
repo_name=event["repo"]["name"]
repo_name= unicodedata.normalize('NFKD', repo_name).encode('ascii','ignore')
#Covers push events in github activity. Won't put the various commit messages here because there could
#be up to 20 commits per push event as documented in the events API.
if event["type"] == "PushEvent":
#Get the branch you pushed to
branch= event["payload"]["ref"]
#Get branch name by doing this because "ref" is seemingly always in this format: "refs/heads/Your_branch_name_here",
branch=branch[11:]
branch=unicodedata.normalize('NFKD', branch).encode('ascii','ignore')
#Get the time at which you pushed to github, normalize and convert it to your local time
time=event["created_at"]
the_time=convert_git_time(time)
info= "("+ the_time +")"+ ": "+github_username +" pushed to "+branch+" at " + repo_name
activity.append(info)
#Covers IssueCommentEvents, need to fix the case where there are pictures embedded in the comments body and all
elif event["type"]=="IssueCommentEvent":
#Get the issue number
issue= payload["issue"]
issue_number= issue["number"]
#Get the contents of a comment aka the body
comment=payload["comment"]
body= comment["body"]
body= unicodedata.normalize('NFKD', body).encode('ascii','ignore')
#Get the time at which you pushed to github, normalize and convert it to local time
time=comment["updated_at"]
the_time=convert_git_time(time)
info= "("+the_time+"): "+ github_username+ " commented on issue "+str(issue_number)+ " for " +repo_name + " : " + body
#Note to self: Apply html to markdown thing on the body later. Might just work
activity.append(info)
elif event["type"]== "PullRequestEvent":
#Get the pull request number
number=payload["number"]
#Get the action associated with the pull request
action=payload["action"]
action= unicodedata.normalize('NFKD', action).encode('ascii','ignore')
#Only handle the case where action is 'synchronize' just for grammar's sake
if action=="synchronize":
action="synchronized"
#Get title of the pull request
title=payload["pull_request"]["title"]
title= unicodedata.normalize('NFKD', title).encode('ascii','ignore')
#Get the potentially empty body of the pull request event
body=payload["pull_request"]["body"]
body= unicodedata.normalize('NFKD',body).encode('ascii','ignore')
#Get the time of the pull request, normalize and convert it to local timezone
time=payload["pull_request"]["updated_at"]
the_time=convert_git_time(time)
#Check if a comment exists for the pull request action- if not, just show the title of the pull request.
if body == "":
info= "("+ the_time +")"+ ": "+github_username +" "+action+" pull request " +str(number)+ " for "+repo_name+" : "+title
else:
info= "("+ the_time +")"+ ": "+github_username +" "+action+" pull request " +str(number)+ " for "+repo_name+" : "+title+" - "+body
activity.append(info)
elif event["type"]=="CreateEvent":
#Get ref_type because it holds what you created - a branch, repo, whatever.
ref_type= payload["ref_type"]
ref_type=unicodedata.normalize('NFKD',ref_type).encode('ascii','ignore')
#Get ref because it holds the name of what you created. Don't need to normalize this?
ref= payload["ref"]
#ref= unicodedata.normalize('NFKD',created).encode('ascii','ignore')
#Get the time, normalize it and convert it to local timezone
time= event["created_at"]
the_time=convert_git_time(time)
info="("+the_time+")" +": "+github_username+" created "+ref_type+" "+ref+" at "+repo_name
activity.append(info)
elif event["type"]=="PullRequestReviewCommentEvent":
#Get the pull request number
number=payload["pull_request"]["number"]
comment=payload["comment"]
#Body of a comment cannot be blank so don't need to handle that case
body=comment["body"]
body= unicodedata.normalize('NFKD',body).encode('ascii','ignore')
#Get the time, normalize it and convert it to local timezone
time= comment["updated_at"]
the_time=convert_git_time(time)
info="("+the_time+")" +": "+github_username+" commented on pull request #" +str(number) +" at " +repo_name+": "+body
activity.append(info)
elif event["type"]== "IssuesEvent":
#Get the issue number
issue= payload["issue"]
number=issue["number"]
#Get the action associated with the issue
action=payload["action"]
action=unicodedata.normalize('NFKD',action).encode('ascii','ignore')
#Get title of the issue
title=issue["title"]
title= unicodedata.normalize('NFKD', title).encode('ascii','ignore')
# Don't need body
# body=issue["body"]
# body= unicodedata.normalize('NFKD',body).encode('ascii','ignore')
time= issue['updated_at']
the_time=convert_git_time(time)
info= "("+ the_time + ")" + ": " +github_username+ " " + action +" issue #"+ str(number) + " at "+repo_name+ " : " + title
activity.append(info)
elif event["type"]=="DeleteEvent":
#Get the name of what was deleted
ref=payload['ref']
ref= unicodedata.normalize('NFKD',ref).encode('ascii','ignore')
#Get what was deleted - a branch, whatever.
ref_type=payload['ref_type']
ref_type=unicodedata.normalize('NFKD',ref_type).encode('ascii','ignore')
#Get the time, normalize and convert it to local timezone
time= event["created_at"]
the_time=convert_git_time(time)
info="("+the_time+")" + ": "+ github_username+" deleted " +ref_type+" "+ref+" at "+ repo_name
activity.append(info)
elif event["type"]== "GollumEvent":
#Get the page referred to in the pages object
pages=payload["pages"][0]
#Get the action associated with the event
action=pages["action"]
action= unicodedata.normalize('NFKD',action).encode('ascii','ignore')
#Get title of the wiki page
title=pages['title']
title= unicodedata.normalize('NFKD',title).encode('ascii','ignore')
#Get the time, normalize and convert to local timezone
time= event["created_at"]
the_time=convert_git_time(time)
info="("+the_time+")" +": "+github_username+ " "+action+ " page "+title+" of the " +repo_name +" wiki."
activity.append(info)
#If the event is not of the 8 types above, don't do anything for it. Just skip it
else:
continue
return activity
|
{
"content_hash": "5787523616e86a69397576227025dc1e",
"timestamp": "",
"source": "github",
"line_count": 614,
"max_line_length": 146,
"avg_line_length": 38.12214983713355,
"alnum_prop": 0.6109710770282394,
"repo_name": "CMPUT410W15/cmput410-project",
"id": "3317685586c9463bd5b09d8d9fe4d0e8697ca952",
"size": "23407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "login/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1165"
},
{
"name": "HTML",
"bytes": "44665"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "127920"
}
],
"symlink_target": ""
}
|
"""The tests for the Script component."""
# pylint: disable=protected-access
import asyncio
from contextlib import contextmanager
from datetime import timedelta
from functools import reduce
import logging
import operator
from types import MappingProxyType
from unittest import mock
from unittest.mock import AsyncMock, patch
from async_timeout import timeout
import pytest
import voluptuous as vol
# Otherwise can't test just this file (import order issue)
from homeassistant import exceptions
import homeassistant.components.scene as scene
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
SERVICE_TURN_ON,
)
from homeassistant.core import SERVICE_CALL_LIMIT, Context, CoreState, callback
from homeassistant.exceptions import ConditionError, ServiceNotFound
from homeassistant.helpers import (
config_validation as cv,
entity_registry as er,
script,
template,
trace,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
async_capture_events,
async_fire_time_changed,
async_mock_service,
)
ENTITY_ID = "script.test"
@pytest.fixture(autouse=True)
def prepare_tracing():
"""Prepare tracing."""
trace.trace_get()
def compare_trigger_item(actual_trigger, expected_trigger):
"""Compare trigger data description."""
assert actual_trigger["description"] == expected_trigger["description"]
def compare_result_item(key, actual, expected, path):
"""Compare an item in the result dict.
Note: Unused variable 'path' is passed to get helpful errors from pytest.
"""
if key == "wait" and (expected.get("trigger") is not None):
assert "trigger" in actual
expected_trigger = expected.pop("trigger")
actual_trigger = actual.pop("trigger")
compare_trigger_item(actual_trigger, expected_trigger)
assert actual == expected
def assert_element(trace_element, expected_element, path):
"""Assert a trace element is as expected.
Note: Unused variable 'path' is passed to get helpful errors from pytest.
"""
expected_result = expected_element.get("result", {})
# Check that every item in expected_element is present and equal in trace_element
# The redundant set operation gives helpful errors from pytest
assert not set(expected_result) - set(trace_element._result or {})
for result_key, result in expected_result.items():
compare_result_item(result_key, trace_element._result[result_key], result, path)
assert trace_element._result[result_key] == result
# Check for unexpected items in trace_element
assert not set(trace_element._result or {}) - set(expected_result)
if "error_type" in expected_element:
assert isinstance(trace_element._error, expected_element["error_type"])
else:
assert trace_element._error is None
# Don't check variables when script starts
if trace_element.path == "0":
return
if "variables" in expected_element:
assert expected_element["variables"] == trace_element._variables
else:
assert not trace_element._variables
def assert_action_trace(expected, expected_script_execution="finished"):
"""Assert a trace condition sequence is as expected."""
action_trace = trace.trace_get(clear=False)
script_execution = trace.script_execution_get()
trace.trace_clear()
expected_trace_keys = list(expected.keys())
assert list(action_trace.keys()) == expected_trace_keys
for trace_key_index, key in enumerate(expected_trace_keys):
assert len(action_trace[key]) == len(expected[key])
for index, element in enumerate(expected[key]):
path = f"[{trace_key_index}][{index}]"
assert_element(action_trace[key][index], element, path)
assert script_execution == expected_script_execution
def async_watch_for_action(script_obj, message):
"""Watch for message in last_action."""
flag = asyncio.Event()
@callback
def check_action():
if script_obj.last_action and message in script_obj.last_action:
flag.set()
script_obj.change_listener = check_action
assert script_obj.change_listener is check_action
return flag
async def test_firing_event_basic(hass, caplog):
"""Test the firing of events."""
event = "test_event"
context = Context()
events = async_capture_events(hass, event)
alias = "event step"
sequence = cv.SCRIPT_SCHEMA(
{"alias": alias, "event": event, "event_data": {"hello": "world"}}
)
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
running_description="test script",
)
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].context is context
assert events[0].data.get("hello") == "world"
assert ".test_name:" in caplog.text
assert "Test Name: Running test script" in caplog.text
assert f"Executing step {alias}" in caplog.text
assert_action_trace(
{
"0": [
{"result": {"event": "test_event", "event_data": {"hello": "world"}}},
],
}
)
async def test_firing_event_template(hass):
"""Test the firing of events."""
event = "test_event"
context = Context()
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
{
"event": event,
"event_data": {
"dict": {
1: "{{ is_world }}",
2: "{{ is_world }}{{ is_world }}",
3: "{{ is_world }}{{ is_world }}{{ is_world }}",
},
"list": ["{{ is_world }}", "{{ is_world }}{{ is_world }}"],
},
"event_data_template": {
"dict2": {
1: "{{ is_world }}",
2: "{{ is_world }}{{ is_world }}",
3: "{{ is_world }}{{ is_world }}{{ is_world }}",
},
"list2": ["{{ is_world }}", "{{ is_world }}{{ is_world }}"],
},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(MappingProxyType({"is_world": "yes"}), context=context)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].context is context
assert events[0].data == {
"dict": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"list": ["yes", "yesyes"],
"dict2": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"list2": ["yes", "yesyes"],
}
assert_action_trace(
{
"0": [
{
"result": {
"event": "test_event",
"event_data": {
"dict": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"dict2": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"list": ["yes", "yesyes"],
"list2": ["yes", "yesyes"],
},
}
}
],
}
)
async def test_calling_service_basic(hass, caplog):
"""Test the calling of a service."""
context = Context()
calls = async_mock_service(hass, "test", "script")
alias = "service step"
sequence = cv.SCRIPT_SCHEMA(
{"alias": alias, "service": "test.script", "data": {"hello": "world"}}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
assert f"Executing step {alias}" in caplog.text
assert_action_trace(
{
"0": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"hello": "world"},
"target": {},
},
"running_script": False,
}
}
],
}
)
async def test_calling_service_template(hass):
"""Test the calling of a service."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA(
{
"service_template": """
{% if True %}
test.script
{% else %}
test.not_script
{% endif %}""",
"data_template": {
"hello": """
{% if is_world == 'yes' %}
world
{% else %}
not world
{% endif %}
"""
},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(MappingProxyType({"is_world": "yes"}), context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
assert_action_trace(
{
"0": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"hello": "world"},
"target": {},
},
"running_script": False,
}
}
],
}
)
async def test_data_template_with_templated_key(hass):
"""Test the calling of a service with a data_template with a templated key."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA(
{"service": "test.script", "data_template": {"{{ hello_var }}": "world"}}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(
MappingProxyType({"hello_var": "hello"}), context=context
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
assert_action_trace(
{
"0": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"hello": "world"},
"target": {},
},
"running_script": False,
}
}
],
}
)
async def test_multiple_runs_no_wait(hass):
"""Test multiple runs with no wait in script."""
logger = logging.getLogger("TEST")
calls = []
heard_event = asyncio.Event()
async def async_simulate_long_service(service):
"""Simulate a service that takes a not insignificant time."""
fire = service.data.get("fire")
listen = service.data.get("listen")
service_done = asyncio.Event()
@callback
def service_done_cb(event):
logger.debug("simulated service (%s:%s) done", fire, listen)
service_done.set()
calls.append(service)
logger.debug("simulated service (%s:%s) started", fire, listen)
unsub = hass.bus.async_listen(str(listen), service_done_cb)
hass.bus.async_fire(str(fire))
await service_done.wait()
unsub()
hass.services.async_register("test", "script", async_simulate_long_service)
@callback
def heard_event_cb(event):
logger.debug("heard: %s", event)
heard_event.set()
sequence = cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data_template": {"fire": "{{ fire1 }}", "listen": "{{ listen1 }}"},
},
{
"service": "test.script",
"data_template": {"fire": "{{ fire2 }}", "listen": "{{ listen2 }}"},
},
]
)
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", script_mode="parallel", max_runs=2
)
# Start script twice in such a way that second run will be started while first run
# is in the middle of the first service call.
unsub = hass.bus.async_listen("1", heard_event_cb)
logger.debug("starting 1st script")
hass.async_create_task(
script_obj.async_run(
MappingProxyType(
{"fire1": "1", "listen1": "2", "fire2": "3", "listen2": "4"}
),
Context(),
)
)
await asyncio.wait_for(heard_event.wait(), 1)
unsub()
logger.debug("starting 2nd script")
await script_obj.async_run(
MappingProxyType({"fire1": "2", "listen1": "3", "fire2": "4", "listen2": "4"}),
Context(),
)
await hass.async_block_till_done()
assert len(calls) == 4
async def test_activating_scene(hass, caplog):
"""Test the activation of a scene."""
context = Context()
calls = async_mock_service(hass, scene.DOMAIN, SERVICE_TURN_ON)
alias = "scene step"
sequence = cv.SCRIPT_SCHEMA({"alias": alias, "scene": "scene.hello"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get(ATTR_ENTITY_ID) == "scene.hello"
assert f"Executing step {alias}" in caplog.text
assert_action_trace(
{
"0": [{"result": {"scene": "scene.hello"}}],
}
)
@pytest.mark.parametrize("count", [1, 3])
async def test_stop_no_wait(hass, count):
"""Test stopping script."""
service_started_sem = asyncio.Semaphore(0)
finish_service_event = asyncio.Event()
event = "test_event"
events = async_capture_events(hass, event)
async def async_simulate_long_service(service):
"""Simulate a service that takes a not insignificant time."""
service_started_sem.release()
await finish_service_event.wait()
hass.services.async_register("test", "script", async_simulate_long_service)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode="parallel",
max_runs=count,
)
# Get script started specified number of times and wait until the test.script
# service has started for each run.
tasks = []
for _ in range(count):
hass.async_create_task(script_obj.async_run(context=Context()))
tasks.append(hass.async_create_task(service_started_sem.acquire()))
await asyncio.wait_for(asyncio.gather(*tasks), 1)
# Can't assert just yet because we haven't verified stopping works yet.
# If assert fails we can hang test if async_stop doesn't work.
script_was_running = script_obj.is_running
were_no_events = len(events) == 0
# Begin the process of stopping the script (which should stop all runs), and then
# let the service calls complete.
hass.async_create_task(script_obj.async_stop())
finish_service_event.set()
await hass.async_block_till_done()
assert script_was_running
assert were_no_events
assert not script_obj.is_running
assert len(events) == 0
async def test_delay_basic(hass):
"""Test the delay."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 5}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.last_action is None
assert_action_trace(
{
"0": [{"result": {"delay": 5.0, "done": True}}],
}
)
async def test_multiple_runs_delay(hass):
"""Test multiple runs with delay in script."""
event = "test_event"
events = async_capture_events(hass, event)
delay = timedelta(seconds=5)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"delay": delay},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", script_mode="parallel", max_runs=2
)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[-1].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
# Start second run of script while first run is in a delay.
script_obj.sequence[1]["alias"] = "delay run 2"
delay_started_flag = async_watch_for_action(script_obj, "delay run 2")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
async_fire_time_changed(hass, dt_util.utcnow() + delay)
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[-3].data["value"] == 1
assert events[-2].data["value"] == 2
assert events[-1].data["value"] == 2
async def test_delay_template_ok(hass):
"""Test the delay as a template."""
sequence = cv.SCRIPT_SCHEMA({"delay": "00:00:{{ 5 }}"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert_action_trace(
{
"0": [{"result": {"delay": 5.0, "done": True}}],
}
)
async def test_delay_template_invalid(hass, caplog):
"""Test the delay as a template that fails."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"delay": "{{ invalid_delay }}"},
{"delay": {"seconds": 5}},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
start_idx = len(caplog.records)
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert any(
rec.levelname == "ERROR" and "Error rendering" in rec.message
for rec in caplog.records[start_idx:]
)
assert not script_obj.is_running
assert len(events) == 1
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"error_type": vol.MultipleInvalid}],
},
expected_script_execution="aborted",
)
async def test_delay_template_complex_ok(hass):
"""Test the delay with a working complex template."""
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": "{{ 5 }}"}})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert_action_trace(
{
"0": [{"result": {"delay": 5.0, "done": True}}],
}
)
async def test_delay_template_complex_invalid(hass, caplog):
"""Test the delay with a complex template that fails."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"delay": {"seconds": "{{ invalid_delay }}"}},
{"delay": {"seconds": 5}},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
start_idx = len(caplog.records)
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert any(
rec.levelname == "ERROR" and "Error rendering" in rec.message
for rec in caplog.records[start_idx:]
)
assert not script_obj.is_running
assert len(events) == 1
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"error_type": vol.MultipleInvalid}],
},
expected_script_execution="aborted",
)
async def test_cancel_delay(hass):
"""Test the cancelling while the delay is present."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA([{"delay": {"seconds": 5}}, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await script_obj.async_stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 0
assert_action_trace(
{
"0": [{"result": {"delay": 5.0, "done": False}}],
},
expected_script_execution="cancelled",
)
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_basic(hass, action_type):
"""Test wait actions."""
wait_alias = "wait step"
action = {"alias": wait_alias}
if action_type == "template":
action["wait_template"] = "{{ states.switch.test.state == 'off' }}"
else:
action["wait_for_trigger"] = {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
sequence = cv.SCRIPT_SCHEMA(action)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, wait_alias)
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == wait_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.last_action is None
if action_type == "template":
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": True, "remaining": None}}}],
}
)
else:
assert_action_trace(
{
"0": [
{
"result": {
"wait": {
"trigger": {"description": "state of switch.test"},
"remaining": None,
}
}
}
],
}
)
async def test_wait_for_trigger_variables(hass):
"""Test variables are passed to wait_for_trigger action."""
context = Context()
wait_alias = "wait step"
actions = [
{
"alias": "variables",
"variables": {"seconds": 5},
},
{
"alias": wait_alias,
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
"for": {"seconds": "{{ seconds }}"},
},
},
]
sequence = cv.SCRIPT_SCHEMA(actions)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, wait_alias)
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=context))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == wait_alias
hass.states.async_set("switch.test", "off")
# the script task + 2 tasks created by wait_for_trigger script step
await hass.async_wait_for_task_count(3)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
assert not script_obj.is_running
assert script_obj.last_action is None
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_basic_times_out(hass, action_type):
"""Test wait actions times out when the action does not happen."""
wait_alias = "wait step"
action = {"alias": wait_alias}
if action_type == "template":
action["wait_template"] = "{{ states.switch.test.state == 'off' }}"
else:
action["wait_for_trigger"] = {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
sequence = cv.SCRIPT_SCHEMA(action)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, wait_alias)
timed_out = False
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == wait_alias
hass.states.async_set("switch.test", "not_on")
async with timeout(0.1):
await hass.async_block_till_done()
except asyncio.TimeoutError:
timed_out = True
await script_obj.async_stop()
assert timed_out
if action_type == "template":
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": False, "remaining": None}}}],
}
)
else:
assert_action_trace(
{
"0": [{"result": {"wait": {"trigger": None, "remaining": None}}}],
}
)
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_multiple_runs_wait(hass, action_type):
"""Test multiple runs with wait in script."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
action,
{"event": event, "event_data": {"value": 2}},
]
)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", script_mode="parallel", max_runs=2
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[-1].data["value"] == 1
# Start second run of script while first run is in wait_template.
wait_started_flag.clear()
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[-3].data["value"] == 1
assert events[-2].data["value"] == 2
assert events[-1].data["value"] == 2
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_cancel_wait(hass, action_type):
"""Test the cancelling while wait is present."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
sequence = cv.SCRIPT_SCHEMA([action, {"event": event}])
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await script_obj.async_stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 0
if action_type == "template":
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": False, "remaining": None}}}],
},
expected_script_execution="cancelled",
)
else:
assert_action_trace(
{
"0": [{"result": {"wait": {"trigger": None, "remaining": None}}}],
},
expected_script_execution="cancelled",
)
async def test_wait_template_not_schedule(hass):
"""Test the wait template with correct condition."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"wait_template": "{{ states.switch.test.state == 'on' }}"},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("switch.test", "on")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"result": {"wait": {"completed": True, "remaining": None}}}],
"2": [
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"wait": {"completed": True, "remaining": None}},
}
],
}
)
@pytest.mark.parametrize(
"timeout_param", [5, "{{ 5 }}", {"seconds": 5}, {"seconds": "{{ 5 }}"}]
)
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_timeout(hass, caplog, timeout_param, action_type):
"""Test the wait timeout option."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
action["timeout"] = timeout_param
action["continue_on_timeout"] = True
sequence = cv.SCRIPT_SCHEMA([action, {"event": event}])
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
cur_time = dt_util.utcnow()
async_fire_time_changed(hass, cur_time + timedelta(seconds=4))
await asyncio.sleep(0)
assert len(events) == 0
async_fire_time_changed(hass, cur_time + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 1
assert "(timeout: 0:00:05)" in caplog.text
if action_type == "template":
variable_wait = {"wait": {"completed": False, "remaining": 0.0}}
else:
variable_wait = {"wait": {"trigger": None, "remaining": 0.0}}
expected_trace = {
"0": [{"result": variable_wait}],
"1": [
{
"result": {"event": "test_event", "event_data": {}},
"variables": variable_wait,
}
],
}
assert_action_trace(expected_trace)
@pytest.mark.parametrize(
"continue_on_timeout,n_events", [(False, 0), (True, 1), (None, 1)]
)
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_continue_on_timeout(
hass, continue_on_timeout, n_events, action_type
):
"""Test the wait continue_on_timeout option."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
action["timeout"] = 5
if continue_on_timeout is not None:
action["continue_on_timeout"] = continue_on_timeout
sequence = cv.SCRIPT_SCHEMA([action, {"event": event}])
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == n_events
if action_type == "template":
variable_wait = {"wait": {"completed": False, "remaining": 0.0}}
else:
variable_wait = {"wait": {"trigger": None, "remaining": 0.0}}
expected_trace = {
"0": [{"result": variable_wait}],
}
if continue_on_timeout is False:
expected_trace["0"][0]["result"]["timeout"] = True
expected_trace["0"][0]["error_type"] = asyncio.TimeoutError
expected_script_execution = "aborted"
else:
expected_trace["1"] = [
{
"result": {"event": "test_event", "event_data": {}},
"variables": variable_wait,
}
]
expected_script_execution = "finished"
assert_action_trace(expected_trace, expected_script_execution)
async def test_wait_template_variables_in(hass):
"""Test the wait template with input variables."""
sequence = cv.SCRIPT_SCHEMA({"wait_template": "{{ is_state(data, 'off') }}"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(
script_obj.async_run(MappingProxyType({"data": "switch.test"}), Context())
)
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": True, "remaining": None}}}],
}
)
async def test_wait_template_with_utcnow(hass):
"""Test the wait template with utcnow."""
sequence = cv.SCRIPT_SCHEMA({"wait_template": "{{ utcnow().hour == 12 }}"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
start_time = dt_util.utcnow().replace(minute=1) + timedelta(hours=48)
try:
non_matching_time = start_time.replace(hour=3)
with patch("homeassistant.util.dt.utcnow", return_value=non_matching_time):
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
match_time = start_time.replace(hour=12)
with patch("homeassistant.util.dt.utcnow", return_value=match_time):
async_fire_time_changed(hass, match_time)
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await hass.async_block_till_done()
assert not script_obj.is_running
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": True, "remaining": None}}}],
}
)
async def test_wait_template_with_utcnow_no_match(hass):
"""Test the wait template with utcnow that does not match."""
sequence = cv.SCRIPT_SCHEMA({"wait_template": "{{ utcnow().hour == 12 }}"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
start_time = dt_util.utcnow().replace(minute=1) + timedelta(hours=48)
timed_out = False
try:
non_matching_time = start_time.replace(hour=3)
with patch("homeassistant.util.dt.utcnow", return_value=non_matching_time):
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
second_non_matching_time = start_time.replace(hour=4)
with patch(
"homeassistant.util.dt.utcnow", return_value=second_non_matching_time
):
async_fire_time_changed(hass, second_non_matching_time)
async with timeout(0.1):
await hass.async_block_till_done()
except asyncio.TimeoutError:
timed_out = True
await script_obj.async_stop()
assert timed_out
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": False, "remaining": None}}}],
}
)
@pytest.mark.parametrize("mode", ["no_timeout", "timeout_finish", "timeout_not_finish"])
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_variables_out(hass, mode, action_type):
"""Test the wait output variable."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
event_key = "completed"
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
event_key = "trigger"
if mode != "no_timeout":
action["timeout"] = 5
action["continue_on_timeout"] = True
sequence = [
action,
{
"event": event,
"event_data_template": {
event_key: f"{{{{ wait.{event_key} }}}}",
"remaining": "{{ wait.remaining }}",
},
},
]
sequence = cv.SCRIPT_SCHEMA(sequence)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
if mode == "timeout_not_finish":
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 1
if action_type == "template":
assert events[0].data["completed"] == (mode != "timeout_not_finish")
elif mode != "timeout_not_finish":
assert "'to_state': <state switch.test=off" in events[0].data["trigger"]
else:
assert events[0].data["trigger"] is None
remaining = events[0].data["remaining"]
if mode == "no_timeout":
assert remaining is None
elif mode == "timeout_finish":
assert 0.0 < float(remaining) < 5
else:
assert float(remaining) == 0.0
async def test_wait_for_trigger_bad(hass, caplog):
"""Test bad wait_for_trigger."""
sequence = cv.SCRIPT_SCHEMA(
{"wait_for_trigger": {"platform": "state", "entity_id": "sensor.abc"}}
)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
)
async def async_attach_trigger_mock(*args, **kwargs):
return None
with mock.patch(
"homeassistant.components.homeassistant.triggers.state.async_attach_trigger",
wraps=async_attach_trigger_mock,
):
hass.async_create_task(script_obj.async_run())
await hass.async_block_till_done()
assert "Unknown error while setting up trigger" in caplog.text
assert_action_trace(
{
"0": [{"result": {"wait": {"trigger": None, "remaining": None}}}],
}
)
async def test_wait_for_trigger_generated_exception(hass, caplog):
"""Test bad wait_for_trigger."""
sequence = cv.SCRIPT_SCHEMA(
{"wait_for_trigger": {"platform": "state", "entity_id": "sensor.abc"}}
)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
)
async def async_attach_trigger_mock(*args, **kwargs):
raise ValueError("something bad")
with mock.patch(
"homeassistant.components.homeassistant.triggers.state.async_attach_trigger",
wraps=async_attach_trigger_mock,
):
hass.async_create_task(script_obj.async_run())
await hass.async_block_till_done()
assert "Error setting up trigger" in caplog.text
assert "ValueError" in caplog.text
assert "something bad" in caplog.text
assert_action_trace(
{
"0": [{"result": {"wait": {"trigger": None, "remaining": None}}}],
}
)
async def test_condition_warning(hass, caplog):
"""Test warning on condition."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{
"condition": "numeric_state",
"entity_id": "test.entity",
"above": 0,
},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
caplog.clear()
caplog.set_level(logging.WARNING)
hass.states.async_set("test.entity", "string")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(caplog.record_tuples) == 1
assert caplog.record_tuples[0][1] == logging.WARNING
assert len(events) == 1
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"error_type": script._StopScript, "result": {"result": False}}],
"1/entity_id/0": [{"error_type": ConditionError}],
},
expected_script_execution="aborted",
)
async def test_condition_basic(hass, caplog):
"""Test if we can use conditions in a script."""
event = "test_event"
events = async_capture_events(hass, event)
alias = "condition step"
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{
"alias": alias,
"condition": "template",
"value_template": "{{ states.test.entity.state == 'hello' }}",
},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert f"Test condition {alias}: True" in caplog.text
caplog.clear()
assert len(events) == 2
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"result": {"entities": ["test.entity"], "result": True}}],
"2": [{"result": {"event": "test_event", "event_data": {}}}],
}
)
hass.states.async_set("test.entity", "goodbye")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert f"Test condition {alias}: False" in caplog.text
assert len(events) == 3
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [
{
"error_type": script._StopScript,
"result": {"entities": ["test.entity"], "result": False},
}
],
},
expected_script_execution="aborted",
)
async def test_shorthand_template_condition(hass, caplog):
"""Test if we can use shorthand template conditions in a script."""
event = "test_event"
events = async_capture_events(hass, event)
alias = "condition step"
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{
"alias": alias,
"condition": "{{ states.test.entity.state == 'hello' }}",
},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert f"Test condition {alias}: True" in caplog.text
caplog.clear()
assert len(events) == 2
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"result": {"entities": ["test.entity"], "result": True}}],
"2": [{"result": {"event": "test_event", "event_data": {}}}],
}
)
hass.states.async_set("test.entity", "goodbye")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert f"Test condition {alias}: False" in caplog.text
assert len(events) == 3
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [
{
"error_type": script._StopScript,
"result": {"entities": ["test.entity"], "result": False},
}
],
},
expected_script_execution="aborted",
)
async def test_condition_validation(hass, caplog):
"""Test if we can use conditions which validate late in a script."""
registry = er.async_get(hass)
entry = registry.async_get_or_create(
"test", "hue", "1234", suggested_object_id="entity"
)
assert entry.entity_id == "test.entity"
event = "test_event"
events = async_capture_events(hass, event)
alias = "condition step"
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{
"alias": alias,
"condition": "state",
"entity_id": entry.id,
"state": "hello",
},
{"event": event},
]
)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert f"Test condition {alias}: True" in caplog.text
caplog.clear()
assert len(events) == 2
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"result": {"result": True}}],
"1/entity_id/0": [
{"result": {"result": True, "state": "hello", "wanted_state": "hello"}}
],
"2": [{"result": {"event": "test_event", "event_data": {}}}],
}
)
hass.states.async_set("test.entity", "goodbye")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert f"Test condition {alias}: False" in caplog.text
assert len(events) == 3
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [
{
"error_type": script._StopScript,
"result": {"result": False},
}
],
"1/entity_id/0": [
{
"result": {
"result": False,
"state": "goodbye",
"wanted_state": "hello",
}
}
],
},
expected_script_execution="aborted",
)
@patch("homeassistant.helpers.script.condition.async_from_config")
async def test_condition_created_once(async_from_config, hass):
"""Test that the conditions do not get created multiple times."""
sequence = cv.SCRIPT_SCHEMA(
{
"condition": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
}
)
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", script_mode="parallel", max_runs=2
)
async_from_config.reset_mock()
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
async_from_config.assert_called_once()
assert len(script_obj._config_cache) == 1
async def test_condition_all_cached(hass):
"""Test that multiple conditions get cached."""
sequence = cv.SCRIPT_SCHEMA(
[
{
"condition": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
},
{
"condition": "template",
"value_template": '{{ states.test.entity.state != "hello" }}',
},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(script_obj._config_cache) == 2
@pytest.mark.parametrize("count", [3, script.ACTION_TRACE_NODE_MAX_LEN * 2])
async def test_repeat_count(hass, caplog, count):
"""Test repeat action w/ count option."""
event = "test_event"
events = async_capture_events(hass, event)
alias = "condition step"
sequence = cv.SCRIPT_SCHEMA(
{
"alias": alias,
"repeat": {
"count": count,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(events) == count
for index, event in enumerate(events):
assert event.data.get("first") == (index == 0)
assert event.data.get("index") == index + 1
assert event.data.get("last") == (index == count - 1)
assert caplog.text.count(f"Repeating {alias}") == count
first_index = max(1, count - script.ACTION_TRACE_NODE_MAX_LEN + 1)
last_index = count + 1
assert_action_trace(
{
"0": [{}],
"0/repeat/sequence/0": [
{
"result": {
"event": "test_event",
"event_data": {
"first": index == 1,
"index": index,
"last": index == count,
},
},
"variables": {
"repeat": {
"first": index == 1,
"index": index,
"last": index == count,
}
},
}
for index in range(first_index, last_index)
],
}
)
async def test_repeat_count_0(hass, caplog):
"""Test repeat action w/ count option."""
event = "test_event"
events = async_capture_events(hass, event)
count = 0
alias = "condition step"
sequence = cv.SCRIPT_SCHEMA(
{
"alias": alias,
"repeat": {
"count": count,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(events) == count
assert caplog.text.count(f"Repeating {alias}") == count
assert_action_trace(
{
"0": [{}],
}
)
@pytest.mark.parametrize("condition", ["while", "until"])
async def test_repeat_condition_warning(hass, caplog, condition):
"""Test warning on repeat conditions."""
event = "test_event"
events = async_capture_events(hass, event)
count = 0 if condition == "while" else 1
sequence = {
"repeat": {
"sequence": [
{
"event": event,
},
],
}
}
sequence["repeat"][condition] = {
"condition": "numeric_state",
"entity_id": "sensor.test",
"value_template": "{{ unassigned_variable }}",
"above": "0",
}
script_obj = script.Script(
hass, cv.SCRIPT_SCHEMA(sequence), f"Test {condition}", "test_domain"
)
# wait_started = async_watch_for_action(script_obj, "wait")
hass.states.async_set("sensor.test", "1")
caplog.clear()
caplog.set_level(logging.WARNING)
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(hass.async_block_till_done(), 1)
assert f"Error in '{condition}[0]' evaluation" in caplog.text
assert len(events) == count
expected_trace = {"0": [{}]}
if condition == "until":
expected_trace["0/repeat/sequence/0"] = [
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"repeat": {"first": True, "index": 1}},
}
]
expected_trace["0/repeat"] = [
{
"result": {"result": None},
"variables": {"repeat": {"first": True, "index": 1}},
}
]
expected_trace[f"0/repeat/{condition}/0"] = [{"error_type": ConditionError}]
expected_trace[f"0/repeat/{condition}/0/entity_id/0"] = [
{"error_type": ConditionError}
]
assert_action_trace(expected_trace)
@pytest.mark.parametrize("condition", ["while", "until"])
@pytest.mark.parametrize("direct_template", [False, True])
async def test_repeat_conditional(hass, condition, direct_template):
"""Test repeat action w/ while option."""
event = "test_event"
events = async_capture_events(hass, event)
count = 3
sequence = {
"repeat": {
"sequence": [
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
},
},
{"wait_template": "{{ is_state('sensor.test', 'next') }}"},
{"wait_template": "{{ not is_state('sensor.test', 'next') }}"},
],
}
}
if condition == "while":
template = "{{ not is_state('sensor.test', 'done') }}"
if direct_template:
sequence["repeat"]["while"] = template
else:
sequence["repeat"]["while"] = {
"condition": "template",
"value_template": template,
}
else:
template = "{{ is_state('sensor.test', 'done') }}"
if direct_template:
sequence["repeat"]["until"] = template
else:
sequence["repeat"]["until"] = {
"condition": "template",
"value_template": template,
}
script_obj = script.Script(
hass, cv.SCRIPT_SCHEMA(sequence), "Test Name", "test_domain"
)
wait_started = async_watch_for_action(script_obj, "wait")
hass.states.async_set("sensor.test", "1")
hass.async_create_task(script_obj.async_run(context=Context()))
try:
for index in range(2, count + 1):
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "next")
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", index)
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "next")
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "done")
await asyncio.wait_for(hass.async_block_till_done(), 1)
except asyncio.TimeoutError:
await script_obj.async_stop()
raise
assert len(events) == count
for index, event in enumerate(events):
assert event.data.get("first") == (index == 0)
assert event.data.get("index") == index + 1
async def test_repeat_until_condition_validation(hass, caplog):
"""Test if we can use conditions in repeat until conditions which validate late."""
registry = er.async_get(hass)
entry = registry.async_get_or_create(
"test", "hue", "1234", suggested_object_id="entity"
)
assert entry.entity_id == "test.entity"
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{
"repeat": {
"sequence": [
{"event": event},
],
"until": [
{
"condition": "state",
"entity_id": entry.id,
"state": "hello",
}
],
}
},
]
)
hass.states.async_set("test.entity", "hello")
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
caplog.clear()
assert len(events) == 1
assert_action_trace(
{
"0": [{"result": {}}],
"0/repeat/sequence/0": [
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"repeat": {"first": True, "index": 1}},
}
],
"0/repeat": [
{
"result": {"result": True},
"variables": {"repeat": {"first": True, "index": 1}},
}
],
"0/repeat/until/0": [{"result": {"result": True}}],
"0/repeat/until/0/entity_id/0": [
{"result": {"result": True, "state": "hello", "wanted_state": "hello"}}
],
}
)
async def test_repeat_while_condition_validation(hass, caplog):
"""Test if we can use conditions in repeat while conditions which validate late."""
registry = er.async_get(hass)
entry = registry.async_get_or_create(
"test", "hue", "1234", suggested_object_id="entity"
)
assert entry.entity_id == "test.entity"
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{
"repeat": {
"sequence": [
{"event": event},
],
"while": [
{
"condition": "state",
"entity_id": entry.id,
"state": "hello",
}
],
}
},
]
)
hass.states.async_set("test.entity", "goodbye")
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
caplog.clear()
assert len(events) == 0
assert_action_trace(
{
"0": [{"result": {}}],
"0/repeat": [
{
"result": {"result": False},
"variables": {"repeat": {"first": True, "index": 1}},
}
],
"0/repeat/while/0": [{"result": {"result": False}}],
"0/repeat/while/0/entity_id/0": [
{
"result": {
"result": False,
"state": "goodbye",
"wanted_state": "hello",
}
}
],
}
)
@pytest.mark.parametrize("condition", ["while", "until"])
async def test_repeat_var_in_condition(hass, condition):
"""Test repeat action w/ while option."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = {"repeat": {"sequence": {"event": event}}}
if condition == "while":
value_template = "{{ repeat.index <= 2 }}"
else:
value_template = "{{ repeat.index == 2 }}"
sequence["repeat"][condition] = {
"condition": "template",
"value_template": value_template,
}
script_obj = script.Script(
hass, cv.SCRIPT_SCHEMA(sequence), "Test Name", "test_domain"
)
with mock.patch(
"homeassistant.helpers.condition._LOGGER.error",
side_effect=AssertionError("Template Error"),
):
await script_obj.async_run(context=Context())
assert len(events) == 2
if condition == "while":
expected_trace = {
"0": [{}],
"0/repeat": [
{
"result": {"result": True},
"variables": {"repeat": {"first": True, "index": 1}},
},
{
"result": {"result": True},
"variables": {"repeat": {"first": False, "index": 2}},
},
{
"result": {"result": False},
"variables": {"repeat": {"first": False, "index": 3}},
},
],
"0/repeat/while/0": [
{"result": {"entities": [], "result": True}},
{"result": {"entities": [], "result": True}},
{"result": {"entities": [], "result": False}},
],
"0/repeat/sequence/0": [
{"result": {"event": "test_event", "event_data": {}}}
]
* 2,
}
else:
expected_trace = {
"0": [{}],
"0/repeat/sequence/0": [
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"repeat": {"first": True, "index": 1}},
},
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"repeat": {"first": False, "index": 2}},
},
],
"0/repeat": [
{
"result": {"result": False},
"variables": {"repeat": {"first": True, "index": 1}},
},
{
"result": {"result": True},
"variables": {"repeat": {"first": False, "index": 2}},
},
],
"0/repeat/until/0": [
{"result": {"entities": [], "result": False}},
{"result": {"entities": [], "result": True}},
],
}
assert_action_trace(expected_trace)
@pytest.mark.parametrize(
"variables,first_last,inside_x",
[
(None, {"repeat": None, "x": None}, None),
(MappingProxyType({"x": 1}), {"repeat": None, "x": 1}, 1),
],
)
async def test_repeat_nested(hass, variables, first_last, inside_x):
"""Test nested repeats."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{
"event": event,
"event_data_template": {
"repeat": "{{ None if repeat is not defined else repeat }}",
"x": "{{ None if x is not defined else x }}",
},
},
{
"repeat": {
"count": 2,
"sequence": [
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
"x": "{{ None if x is not defined else x }}",
},
},
{
"repeat": {
"count": 2,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
"x": "{{ None if x is not defined else x }}",
},
},
}
},
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
"x": "{{ None if x is not defined else x }}",
},
},
],
}
},
{
"event": event,
"event_data_template": {
"repeat": "{{ None if repeat is not defined else repeat }}",
"x": "{{ None if x is not defined else x }}",
},
},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
with mock.patch(
"homeassistant.helpers.condition._LOGGER.error",
side_effect=AssertionError("Template Error"),
):
await script_obj.async_run(variables, Context())
assert len(events) == 10
assert events[0].data == first_last
assert events[-1].data == first_last
for index, result in enumerate(
(
(True, 1, False, inside_x),
(True, 1, False, inside_x),
(False, 2, True, inside_x),
(True, 1, False, inside_x),
(False, 2, True, inside_x),
(True, 1, False, inside_x),
(False, 2, True, inside_x),
(False, 2, True, inside_x),
),
1,
):
assert events[index].data == {
"first": result[0],
"index": result[1],
"last": result[2],
"x": result[3],
}
event_data1 = {"repeat": None, "x": inside_x}
event_data2 = [
{"first": True, "index": 1, "last": False, "x": inside_x},
{"first": False, "index": 2, "last": True, "x": inside_x},
]
variable_repeat = [
{"repeat": {"first": True, "index": 1, "last": False}},
{"repeat": {"first": False, "index": 2, "last": True}},
]
expected_trace = {
"0": [{"result": {"event": "test_event", "event_data": event_data1}}],
"1": [{}],
"1/repeat/sequence/0": [
{
"result": {"event": "test_event", "event_data": event_data2[0]},
"variables": variable_repeat[0],
},
{
"result": {"event": "test_event", "event_data": event_data2[1]},
"variables": variable_repeat[1],
},
],
"1/repeat/sequence/1": [{}, {}],
"1/repeat/sequence/1/repeat/sequence/0": [
{"result": {"event": "test_event", "event_data": event_data2[0]}},
{
"result": {"event": "test_event", "event_data": event_data2[1]},
"variables": variable_repeat[1],
},
{
"result": {"event": "test_event", "event_data": event_data2[0]},
"variables": variable_repeat[0],
},
{"result": {"event": "test_event", "event_data": event_data2[1]}},
],
"1/repeat/sequence/2": [
{"result": {"event": "test_event", "event_data": event_data2[0]}},
{"result": {"event": "test_event", "event_data": event_data2[1]}},
],
"2": [{"result": {"event": "test_event", "event_data": event_data1}}],
}
assert_action_trace(expected_trace)
async def test_choose_warning(hass, caplog):
"""Test warning on choose."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
{
"choose": [
{
"conditions": {
"condition": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ undefined_a + undefined_b }}",
"above": 1,
},
"sequence": {"event": event, "event_data": {"choice": "first"}},
},
{
"conditions": {
"condition": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ 'string' }}",
"above": 2,
},
"sequence": {"event": event, "event_data": {"choice": "second"}},
},
],
"default": {"event": event, "event_data": {"choice": "default"}},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("test.entity", "9")
await hass.async_block_till_done()
caplog.clear()
caplog.set_level(logging.WARNING)
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(caplog.record_tuples) == 2
assert caplog.record_tuples[0][1] == logging.WARNING
assert caplog.record_tuples[1][1] == logging.WARNING
assert len(events) == 1
assert events[0].data["choice"] == "default"
@pytest.mark.parametrize("var,result", [(1, "first"), (2, "second"), (3, "default")])
async def test_choose(hass, caplog, var, result):
"""Test choose action."""
event = "test_event"
events = async_capture_events(hass, event)
alias = "choose step"
choice = {1: "choice one", 2: "choice two", 3: None}
aliases = {1: "sequence one", 2: "sequence two", 3: "default sequence"}
sequence = cv.SCRIPT_SCHEMA(
{
"alias": alias,
"choose": [
{
"alias": choice[1],
"conditions": {
"condition": "template",
"value_template": "{{ var == 1 }}",
},
"sequence": {
"alias": aliases[1],
"event": event,
"event_data": {"choice": "first"},
},
},
{
"alias": choice[2],
"conditions": "{{ var == 2 }}",
"sequence": {
"alias": aliases[2],
"event": event,
"event_data": {"choice": "second"},
},
},
],
"default": {
"alias": aliases[3],
"event": event,
"event_data": {"choice": "default"},
},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(MappingProxyType({"var": var}), Context())
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["choice"] == result
expected_choice = choice[var]
if var == 3:
expected_choice = "default"
assert f"{alias}: {expected_choice}: Executing step {aliases[var]}" in caplog.text
expected_choice = var - 1
if var == 3:
expected_choice = "default"
expected_trace = {"0": [{"result": {"choice": expected_choice}}]}
if var >= 1:
expected_trace["0/choose/0"] = [{"result": {"result": var == 1}}]
expected_trace["0/choose/0/conditions/0"] = [
{"result": {"entities": [], "result": var == 1}}
]
if var >= 2:
expected_trace["0/choose/1"] = [{"result": {"result": var == 2}}]
expected_trace["0/choose/1/conditions/0"] = [
{"result": {"entities": [], "result": var == 2}}
]
if var == 1:
expected_trace["0/choose/0/sequence/0"] = [
{"result": {"event": "test_event", "event_data": {"choice": "first"}}}
]
if var == 2:
expected_trace["0/choose/1/sequence/0"] = [
{"result": {"event": "test_event", "event_data": {"choice": "second"}}}
]
if var == 3:
expected_trace["0/default/0"] = [
{"result": {"event": "test_event", "event_data": {"choice": "default"}}}
]
assert_action_trace(expected_trace)
async def test_choose_condition_validation(hass, caplog):
"""Test if we can use conditions in choose actions which validate late."""
registry = er.async_get(hass)
entry = registry.async_get_or_create(
"test", "hue", "1234", suggested_object_id="entity"
)
assert entry.entity_id == "test.entity"
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{
"choose": [
{
"alias": "choice one",
"conditions": {
"condition": "state",
"entity_id": entry.id,
"state": "hello",
},
"sequence": {
"alias": "sequence one",
"event": event,
"event_data": {"choice": "first"},
},
},
]
},
]
)
sequence = await script.async_validate_actions_config(hass, sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
caplog.clear()
assert len(events) == 2
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"result": {"choice": 0}}],
"1/choose/0": [{"result": {"result": True}}],
"1/choose/0/conditions/0": [{"result": {"result": True}}],
"1/choose/0/conditions/0/entity_id/0": [
{"result": {"result": True, "state": "hello", "wanted_state": "hello"}}
],
"1/choose/0/sequence/0": [
{"result": {"event": "test_event", "event_data": {"choice": "first"}}}
],
}
)
hass.states.async_set("test.entity", "goodbye")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(events) == 3
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"result": {}}],
"1/choose/0": [{"result": {"result": False}}],
"1/choose/0/conditions/0": [{"result": {"result": False}}],
"1/choose/0/conditions/0/entity_id/0": [
{
"result": {
"result": False,
"state": "goodbye",
"wanted_state": "hello",
}
}
],
},
)
@pytest.mark.parametrize(
"action",
[
{"repeat": {"count": 1, "sequence": {"event": "abc"}}},
{"choose": {"conditions": [], "sequence": {"event": "abc"}}},
{"choose": [], "default": {"event": "abc"}},
],
)
async def test_multiple_runs_repeat_choose(hass, caplog, action):
"""Test parallel runs with repeat & choose actions & max_runs > default."""
max_runs = script.DEFAULT_MAX + 1
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(action),
"Test Name",
"test_domain",
script_mode="parallel",
max_runs=max_runs,
)
events = async_capture_events(hass, "abc")
for _ in range(max_runs):
hass.async_create_task(script_obj.async_run(context=Context()))
await hass.async_block_till_done()
assert "WARNING" not in caplog.text
assert "ERROR" not in caplog.text
assert len(events) == max_runs
async def test_last_triggered(hass):
"""Test the last_triggered."""
event = "test_event"
sequence = cv.SCRIPT_SCHEMA({"event": event})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
assert script_obj.last_triggered is None
time = dt_util.utcnow()
with mock.patch("homeassistant.helpers.script.utcnow", return_value=time):
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert script_obj.last_triggered == time
async def test_propagate_error_service_not_found(hass):
"""Test that a script aborts when a service is not found."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
with pytest.raises(exceptions.ServiceNotFound):
await script_obj.async_run(context=Context())
assert len(events) == 0
assert not script_obj.is_running
expected_trace = {
"0": [
{
"error_type": ServiceNotFound,
"result": {
"limit": 10,
"params": {
"domain": "test",
"service": "script",
"service_data": {},
"target": {},
},
"running_script": False,
},
}
],
}
assert_action_trace(expected_trace, expected_script_execution="error")
async def test_propagate_error_invalid_service_data(hass):
"""Test that a script aborts when we send invalid service data."""
event = "test_event"
events = async_capture_events(hass, event)
calls = async_mock_service(hass, "test", "script", vol.Schema({"text": str}))
sequence = cv.SCRIPT_SCHEMA(
[{"service": "test.script", "data": {"text": 1}}, {"event": event}]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
with pytest.raises(vol.Invalid):
await script_obj.async_run(context=Context())
assert len(events) == 0
assert len(calls) == 0
assert not script_obj.is_running
expected_trace = {
"0": [
{
"error_type": vol.MultipleInvalid,
"result": {
"limit": 10,
"params": {
"domain": "test",
"service": "script",
"service_data": {"text": 1},
"target": {},
},
"running_script": False,
},
}
],
}
assert_action_trace(expected_trace, expected_script_execution="error")
async def test_propagate_error_service_exception(hass):
"""Test that a script aborts when a service throws an exception."""
event = "test_event"
events = async_capture_events(hass, event)
@callback
def record_call(service):
"""Add recorded event to set."""
raise ValueError("BROKEN")
hass.services.async_register("test", "script", record_call)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
with pytest.raises(ValueError):
await script_obj.async_run(context=Context())
assert len(events) == 0
assert not script_obj.is_running
expected_trace = {
"0": [
{
"error_type": ValueError,
"result": {
"limit": 10,
"params": {
"domain": "test",
"service": "script",
"service_data": {},
"target": {},
},
"running_script": False,
},
}
],
}
assert_action_trace(expected_trace, expected_script_execution="error")
async def test_referenced_areas(hass):
"""Test referenced areas."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data": {"area_id": "area_service_not_list"},
},
{
"service": "test.script",
"data": {"area_id": ["area_service_list"]},
},
{
"service": "test.script",
"data": {"area_id": "{{ 'area_service_template' }}"},
},
{
"service": "test.script",
"target": {"area_id": "area_in_target"},
},
{
"service": "test.script",
"data_template": {"area_id": "area_in_data_template"},
},
{"service": "test.script", "data": {"without": "area_id"}},
{
"choose": [
{
"conditions": "{{ true == false }}",
"sequence": [
{
"service": "test.script",
"data": {"area_id": "area_choice_1_seq"},
}
],
},
{
"conditions": "{{ true == false }}",
"sequence": [
{
"service": "test.script",
"data": {"area_id": "area_choice_2_seq"},
}
],
},
],
"default": [
{
"service": "test.script",
"data": {"area_id": "area_default_seq"},
}
],
},
{"event": "test_event"},
{"delay": "{{ delay_period }}"},
]
),
"Test Name",
"test_domain",
)
assert script_obj.referenced_areas == {
"area_choice_1_seq",
"area_choice_2_seq",
"area_default_seq",
"area_in_data_template",
"area_in_target",
"area_service_list",
"area_service_not_list",
# 'area_service_template', # no area extraction from template
}
# Test we cache results.
assert script_obj.referenced_areas is script_obj.referenced_areas
async def test_referenced_entities(hass):
"""Test referenced entities."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data": {"entity_id": "light.service_not_list"},
},
{
"service": "test.script",
"data": {"entity_id": ["light.service_list"]},
},
{
"service": "test.script",
"data": {"entity_id": "{{ 'light.service_template' }}"},
},
{
"service": "test.script",
"entity_id": "light.direct_entity_referenced",
},
{
"service": "test.script",
"target": {"entity_id": "light.entity_in_target"},
},
{
"service": "test.script",
"data_template": {"entity_id": "light.entity_in_data_template"},
},
{
"condition": "state",
"entity_id": "sensor.condition",
"state": "100",
},
{"service": "test.script", "data": {"without": "entity_id"}},
{"scene": "scene.hello"},
{
"choose": [
{
"conditions": "{{ states.light.choice_1_cond == 'on' }}",
"sequence": [
{
"service": "test.script",
"data": {"entity_id": "light.choice_1_seq"},
}
],
},
{
"conditions": {
"condition": "state",
"entity_id": "light.choice_2_cond",
"state": "on",
},
"sequence": [
{
"service": "test.script",
"data": {"entity_id": "light.choice_2_seq"},
}
],
},
],
"default": [
{
"service": "test.script",
"data": {"entity_id": "light.default_seq"},
}
],
},
{"event": "test_event"},
{"delay": "{{ delay_period }}"},
]
),
"Test Name",
"test_domain",
)
assert script_obj.referenced_entities == {
# "light.choice_1_cond", # no entity extraction from template conditions
"light.choice_1_seq",
"light.choice_2_cond",
"light.choice_2_seq",
"light.default_seq",
"light.direct_entity_referenced",
"light.entity_in_data_template",
"light.entity_in_target",
"light.service_list",
"light.service_not_list",
# "light.service_template", # no entity extraction from template
"scene.hello",
"sensor.condition",
}
# Test we cache results.
assert script_obj.referenced_entities is script_obj.referenced_entities
async def test_referenced_devices(hass):
"""Test referenced entities."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{"domain": "light", "device_id": "script-dev-id"},
{
"condition": "device",
"device_id": "condition-dev-id",
"domain": "switch",
},
{
"service": "test.script",
"data": {"device_id": "data-string-id"},
},
{
"service": "test.script",
"data_template": {"device_id": "data-template-string-id"},
},
{
"service": "test.script",
"target": {"device_id": "target-string-id"},
},
{
"service": "test.script",
"target": {"device_id": ["target-list-id-1", "target-list-id-2"]},
},
{
"choose": [
{
"conditions": "{{ is_device_attr('choice-2-cond-dev-id', 'model', 'blah') }}",
"sequence": [
{
"service": "test.script",
"target": {
"device_id": "choice-1-seq-device-target"
},
}
],
},
{
"conditions": {
"condition": "device",
"device_id": "choice-2-cond-dev-id",
"domain": "switch",
},
"sequence": [
{
"service": "test.script",
"target": {
"device_id": "choice-2-seq-device-target"
},
}
],
},
],
"default": [
{
"service": "test.script",
"target": {"device_id": "default-device-target"},
}
],
},
]
),
"Test Name",
"test_domain",
)
assert script_obj.referenced_devices == {
# 'choice-1-cond-dev-id', # no device extraction from template conditions
"choice-1-seq-device-target",
"choice-2-cond-dev-id",
"choice-2-seq-device-target",
"condition-dev-id",
"data-string-id",
"data-template-string-id",
"default-device-target",
"script-dev-id",
"target-list-id-1",
"target-list-id-2",
"target-string-id",
}
# Test we cache results.
assert script_obj.referenced_devices is script_obj.referenced_devices
@contextmanager
def does_not_raise():
"""Indicate no exception is expected."""
yield
async def test_script_mode_single(hass, caplog):
"""Test overlapping runs with max_runs = 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
await script_obj.async_run(context=Context())
assert "Already running" in caplog.text
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 2
@pytest.mark.parametrize("max_exceeded", [None, "WARNING", "INFO", "ERROR", "SILENT"])
@pytest.mark.parametrize(
"script_mode,max_runs", [("single", 1), ("parallel", 2), ("queued", 2)]
)
async def test_max_exceeded(hass, caplog, max_exceeded, script_mode, max_runs):
"""Test max_exceeded option."""
sequence = cv.SCRIPT_SCHEMA(
{"wait_template": "{{ states.switch.test.state == 'off' }}"}
)
if max_exceeded is None:
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode=script_mode,
max_runs=max_runs,
)
else:
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode=script_mode,
max_runs=max_runs,
max_exceeded=max_exceeded,
)
hass.states.async_set("switch.test", "on")
for _ in range(max_runs + 1):
hass.async_create_task(script_obj.async_run(context=Context()))
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
if max_exceeded is None:
max_exceeded = "WARNING"
if max_exceeded == "SILENT":
assert not any(
any(
message in rec.message
for message in ("Already running", "Maximum number of runs exceeded")
)
for rec in caplog.records
)
else:
assert any(
rec.levelname == max_exceeded
and any(
message in rec.message
for message in ("Already running", "Maximum number of runs exceeded")
)
for rec in caplog.records
)
@pytest.mark.parametrize(
"script_mode,messages,last_events",
[("restart", ["Restarting"], [2]), ("parallel", [], [2, 2])],
)
async def test_script_mode_2(hass, caplog, script_mode, messages, last_events):
"""Test overlapping runs with max_runs > 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
logger = logging.getLogger("TEST")
max_runs = 1 if script_mode == "restart" else 2
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode=script_mode,
max_runs=max_runs,
logger=logger,
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
wait_started_flag.clear()
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 1
assert all(
any(
rec.levelname == "INFO"
and rec.name == "TEST"
and message in rec.message
for rec in caplog.records
)
for message in messages
)
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2 + len(last_events)
for idx, value in enumerate(last_events, start=2):
assert events[idx].data["value"] == value
async def test_script_mode_queued(hass):
"""Test overlapping runs with script_mode = 'queued' & max_runs > 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{
"wait_template": "{{ states.switch.test.state == 'off' }}",
"alias": "wait_1",
},
{"event": event, "event_data": {"value": 2}},
{
"wait_template": "{{ states.switch.test.state == 'on' }}",
"alias": "wait_2",
},
]
)
logger = logging.getLogger("TEST")
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode="queued",
max_runs=2,
logger=logger,
)
watch_messages = []
@callback
def check_action():
for message, flag in watch_messages:
if script_obj.last_action and message in script_obj.last_action:
flag.set()
script_obj.change_listener = check_action
wait_started_flag_1 = asyncio.Event()
watch_messages.append(("wait_1", wait_started_flag_1))
wait_started_flag_2 = asyncio.Event()
watch_messages.append(("wait_2", wait_started_flag_2))
try:
assert not script_obj.is_running
assert script_obj.runs == 0
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag_1.wait(), 1)
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
# This second run should not start until the first run has finished.
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.sleep(0)
assert script_obj.is_running
assert script_obj.runs == 2
assert len(events) == 1
hass.states.async_set("switch.test", "off")
await asyncio.wait_for(wait_started_flag_2.wait(), 1)
assert script_obj.is_running
assert script_obj.runs == 2
assert len(events) == 2
assert events[1].data["value"] == 2
wait_started_flag_1.clear()
hass.states.async_set("switch.test", "on")
await asyncio.wait_for(wait_started_flag_1.wait(), 1)
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 3
assert events[2].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await asyncio.sleep(0)
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.runs == 0
assert len(events) == 4
assert events[3].data["value"] == 2
async def test_script_mode_queued_cancel(hass):
"""Test canceling with a queued run."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA({"wait_template": "{{ false }}"}),
"Test Name",
"test_domain",
script_mode="queued",
max_runs=2,
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
assert not script_obj.is_running
assert script_obj.runs == 0
task1 = hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
task2 = hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.sleep(0)
assert script_obj.is_running
assert script_obj.runs == 2
with pytest.raises(asyncio.CancelledError):
task2.cancel()
await task2
assert script_obj.is_running
assert script_obj.runs == 1
with pytest.raises(asyncio.CancelledError):
task1.cancel()
await task1
assert not script_obj.is_running
assert script_obj.runs == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
async def test_script_logging(hass, caplog):
"""Test script logging."""
script_obj = script.Script(hass, [], "Script with % Name", "test_domain")
script_obj._log("Test message with name %s", 1)
assert "Script with % Name: Test message with name 1" in caplog.text
async def test_shutdown_at(hass, caplog):
"""Test stopping scripts at shutdown."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 120}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "test script", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.bus.async_fire("homeassistant_stop")
await hass.async_block_till_done()
assert not script_obj.is_running
assert "Stopping scripts running at shutdown: test script" in caplog.text
expected_trace = {
"0": [{"result": {"delay": 120.0, "done": False}}],
}
assert_action_trace(expected_trace)
async def test_shutdown_after(hass, caplog):
"""Test stopping scripts at shutdown."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 120}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "test script", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
hass.state = CoreState.stopping
hass.bus.async_fire("homeassistant_stop")
await hass.async_block_till_done()
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=60))
await hass.async_block_till_done()
assert not script_obj.is_running
assert (
"Stopping scripts running too long after shutdown: test script"
in caplog.text
)
expected_trace = {
"0": [{"result": {"delay": 120.0, "done": False}}],
}
assert_action_trace(expected_trace)
async def test_update_logger(hass, caplog):
"""Test updating logger."""
sequence = cv.SCRIPT_SCHEMA({"event": "test_event"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert script.__name__ in caplog.text
log_name = "testing.123"
script_obj.update_logger(logging.getLogger(log_name))
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert log_name in caplog.text
async def test_started_action(hass, caplog):
"""Test the callback of started_action."""
event = "test_event"
log_message = "The script started!"
logger = logging.getLogger("TEST")
sequence = cv.SCRIPT_SCHEMA({"event": event})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
@callback
def started_action():
logger.info(log_message)
await script_obj.async_run(context=Context(), started_action=started_action)
await hass.async_block_till_done()
assert log_message in caplog.text
async def test_set_variable(hass, caplog):
"""Test setting variables in scripts."""
alias = "variables step"
sequence = cv.SCRIPT_SCHEMA(
[
{"alias": alias, "variables": {"variable": "value"}},
{"service": "test.script", "data": {"value": "{{ variable }}"}},
]
)
script_obj = script.Script(hass, sequence, "test script", "test_domain")
mock_calls = async_mock_service(hass, "test", "script")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert mock_calls[0].data["value"] == "value"
assert f"Executing step {alias}" in caplog.text
expected_trace = {
"0": [{}],
"1": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"value": "value"},
"target": {},
},
"running_script": False,
},
"variables": {"variable": "value"},
}
],
}
assert_action_trace(expected_trace)
async def test_set_redefines_variable(hass, caplog):
"""Test setting variables based on their current value."""
sequence = cv.SCRIPT_SCHEMA(
[
{"variables": {"variable": "1"}},
{"service": "test.script", "data": {"value": "{{ variable }}"}},
{"variables": {"variable": "{{ variable | int + 1 }}"}},
{"service": "test.script", "data": {"value": "{{ variable }}"}},
]
)
script_obj = script.Script(hass, sequence, "test script", "test_domain")
mock_calls = async_mock_service(hass, "test", "script")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert mock_calls[0].data["value"] == 1
assert mock_calls[1].data["value"] == 2
expected_trace = {
"0": [{}],
"1": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"value": 1},
"target": {},
},
"running_script": False,
},
"variables": {"variable": "1"},
}
],
"2": [{}],
"3": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"value": 2},
"target": {},
},
"running_script": False,
},
"variables": {"variable": 2},
}
],
}
assert_action_trace(expected_trace)
async def test_validate_action_config(hass):
"""Validate action config."""
def templated_device_action(message):
return {
"device_id": "abcd",
"domain": "mobile_app",
"message": f"{message} {{{{ 5 + 5}}}}",
"type": "notify",
}
configs = {
cv.SCRIPT_ACTION_CALL_SERVICE: {"service": "light.turn_on"},
cv.SCRIPT_ACTION_DELAY: {"delay": 5},
cv.SCRIPT_ACTION_WAIT_TEMPLATE: {
"wait_template": "{{ states.light.kitchen.state == 'on' }}"
},
cv.SCRIPT_ACTION_FIRE_EVENT: {"event": "my_event"},
cv.SCRIPT_ACTION_CHECK_CONDITION: {
"condition": "state",
"entity_id": "light.kitchen",
"state": "on",
},
cv.SCRIPT_ACTION_DEVICE_AUTOMATION: templated_device_action("device"),
cv.SCRIPT_ACTION_ACTIVATE_SCENE: {"scene": "scene.relax"},
cv.SCRIPT_ACTION_REPEAT: {
"repeat": {
"count": 3,
"sequence": [templated_device_action("repeat_event")],
}
},
cv.SCRIPT_ACTION_CHOOSE: {
"choose": [
{
"conditions": "{{ states.light.kitchen.state == 'on' }}",
"sequence": [templated_device_action("choose_event")],
}
],
"default": [templated_device_action("choose_default_event")],
},
cv.SCRIPT_ACTION_WAIT_FOR_TRIGGER: {
"wait_for_trigger": [
{"platform": "event", "event_type": "wait_for_trigger_event"}
]
},
cv.SCRIPT_ACTION_VARIABLES: {"variables": {"hello": "world"}},
}
expected_templates = {
cv.SCRIPT_ACTION_CHECK_CONDITION: None,
cv.SCRIPT_ACTION_DEVICE_AUTOMATION: [[]],
cv.SCRIPT_ACTION_REPEAT: [["repeat", "sequence", 0]],
cv.SCRIPT_ACTION_CHOOSE: [["choose", 0, "sequence", 0], ["default", 0]],
cv.SCRIPT_ACTION_WAIT_FOR_TRIGGER: None,
}
for key in cv.ACTION_TYPE_SCHEMAS:
assert key in configs, f"No validate config test found for {key}"
assert key in expected_templates or key in script.STATIC_VALIDATION_ACTION_TYPES
# Verify we raise if we don't know the action type
with patch(
"homeassistant.helpers.config_validation.determine_script_action",
return_value="non-existing",
), pytest.raises(ValueError):
await script.async_validate_action_config(hass, {})
# Verify each action can validate
validated_config = {}
for action_type, config in configs.items():
assert cv.determine_script_action(config) == action_type
try:
validated_config[action_type] = cv.ACTION_TYPE_SCHEMAS[action_type](config)
validated_config[action_type] = await script.async_validate_action_config(
hass, validated_config[action_type]
)
except vol.Invalid as err:
assert False, f"{action_type} config invalid: {err}"
# Verify non-static actions have validated
for action_type, paths_to_templates in expected_templates.items():
if paths_to_templates is None:
continue
for path_to_template in paths_to_templates:
device_action = reduce(
operator.getitem, path_to_template, validated_config[action_type]
)
assert isinstance(device_action["message"], template.Template)
async def test_embedded_wait_for_trigger_in_automation(hass):
"""Test an embedded wait for trigger."""
assert await async_setup_component(
hass,
"automation",
{
"automation": {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"repeat": {
"while": [
{
"condition": "template",
"value_template": '{{ is_state("test.value1", "trigger-while") }}',
}
],
"sequence": [
{"event": "trigger_wait_event"},
{
"wait_for_trigger": [
{
"platform": "template",
"value_template": '{{ is_state("test.value2", "trigger-wait") }}',
}
]
},
{"service": "test.script"},
],
}
},
}
},
)
hass.states.async_set("test.value1", "trigger-while")
hass.states.async_set("test.value2", "not-trigger-wait")
mock_calls = async_mock_service(hass, "test", "script")
async def trigger_wait_event(_):
# give script the time to attach the trigger.
await asyncio.sleep(0)
hass.states.async_set("test.value1", "not-trigger-while")
hass.states.async_set("test.value2", "trigger-wait")
hass.bus.async_listen("trigger_wait_event", trigger_wait_event)
# Start automation
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(mock_calls) == 1
async def test_breakpoints_1(hass):
"""Test setting a breakpoint halts execution, and execution can be resumed."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 0}}, # Node "0"
{"event": event, "event_data": {"value": 1}}, # Node "1"
{"event": event, "event_data": {"value": 2}}, # Node "2"
{"event": event, "event_data": {"value": 3}}, # Node "3"
{"event": event, "event_data": {"value": 4}}, # Node "4"
{"event": event, "event_data": {"value": 5}}, # Node "5"
{"event": event, "event_data": {"value": 6}}, # Node "6"
{"event": event, "event_data": {"value": 7}}, # Node "7"
]
)
logger = logging.getLogger("TEST")
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode="queued",
max_runs=2,
logger=logger,
)
trace.trace_id_set(("script_1", "1"))
script.breakpoint_set(hass, "script_1", script.RUN_ID_ANY, "1")
script.breakpoint_set(hass, "script_1", script.RUN_ID_ANY, "5")
breakpoint_hit_event = asyncio.Event()
@callback
def breakpoint_hit(*_):
breakpoint_hit_event.set()
async_dispatcher_connect(hass, script.SCRIPT_BREAKPOINT_HIT, breakpoint_hit)
watch_messages = []
@callback
def check_action():
for message, flag in watch_messages:
if script_obj.last_action and message in script_obj.last_action:
flag.set()
script_obj.change_listener = check_action
assert not script_obj.is_running
assert script_obj.runs == 0
# Start script, should stop on breakpoint at node "1"
hass.async_create_task(script_obj.async_run(context=Context()))
await breakpoint_hit_event.wait()
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 1
assert events[-1].data["value"] == 0
# Single step script, should stop at node "2"
breakpoint_hit_event.clear()
script.debug_step(hass, "script_1", "1")
await breakpoint_hit_event.wait()
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 2
assert events[-1].data["value"] == 1
# Single step script, should stop at node "3"
breakpoint_hit_event.clear()
script.debug_step(hass, "script_1", "1")
await breakpoint_hit_event.wait()
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 3
assert events[-1].data["value"] == 2
# Resume script, should stop on breakpoint at node "5"
breakpoint_hit_event.clear()
script.debug_continue(hass, "script_1", "1")
await breakpoint_hit_event.wait()
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 5
assert events[-1].data["value"] == 4
# Resume script, should run until completion
script.debug_continue(hass, "script_1", "1")
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.runs == 0
assert len(events) == 8
assert events[-1].data["value"] == 7
async def test_breakpoints_2(hass):
"""Test setting a breakpoint halts execution, and execution can be aborted."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 0}}, # Node "0"
{"event": event, "event_data": {"value": 1}}, # Node "1"
{"event": event, "event_data": {"value": 2}}, # Node "2"
{"event": event, "event_data": {"value": 3}}, # Node "3"
{"event": event, "event_data": {"value": 4}}, # Node "4"
{"event": event, "event_data": {"value": 5}}, # Node "5"
{"event": event, "event_data": {"value": 6}}, # Node "6"
{"event": event, "event_data": {"value": 7}}, # Node "7"
]
)
logger = logging.getLogger("TEST")
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode="queued",
max_runs=2,
logger=logger,
)
trace.trace_id_set(("script_1", "1"))
script.breakpoint_set(hass, "script_1", script.RUN_ID_ANY, "1")
script.breakpoint_set(hass, "script_1", script.RUN_ID_ANY, "5")
breakpoint_hit_event = asyncio.Event()
@callback
def breakpoint_hit(*_):
breakpoint_hit_event.set()
async_dispatcher_connect(hass, script.SCRIPT_BREAKPOINT_HIT, breakpoint_hit)
watch_messages = []
@callback
def check_action():
for message, flag in watch_messages:
if script_obj.last_action and message in script_obj.last_action:
flag.set()
script_obj.change_listener = check_action
assert not script_obj.is_running
assert script_obj.runs == 0
# Start script, should stop on breakpoint at node "1"
hass.async_create_task(script_obj.async_run(context=Context()))
await breakpoint_hit_event.wait()
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 1
assert events[-1].data["value"] == 0
# Abort script
script.debug_stop(hass, "script_1", "1")
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.runs == 0
assert len(events) == 1
async def test_platform_async_validate_action_config(hass):
"""Test platform.async_validate_action_config will be called if it exists."""
config = {CONF_DEVICE_ID: "test", CONF_DOMAIN: "test"}
platform = AsyncMock()
with patch(
"homeassistant.components.device_automation.action.async_get_device_automation_platform",
return_value=platform,
):
platform.async_validate_action_config.return_value = config
await script.async_validate_action_config(hass, config)
platform.async_validate_action_config.assert_awaited()
|
{
"content_hash": "fc29ce9d1e68fdbcd5a402d19f745e50",
"timestamp": "",
"source": "github",
"line_count": 3729,
"max_line_length": 106,
"avg_line_length": 33.55993563958166,
"alnum_prop": 0.5141875424507572,
"repo_name": "rohitranjan1991/home-assistant",
"id": "11ba9810b9dffd9162c71af1959a7b78337ac66d",
"size": "125145",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/helpers/test_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
"""The tests for the Home Assistant HTTP component."""
# pylint: disable=protected-access
from ipaddress import ip_address
import os
from unittest.mock import Mock, mock_open, patch
from aiohttp import web
from aiohttp.web_exceptions import HTTPUnauthorized
from aiohttp.web_middlewares import middleware
import pytest
import homeassistant.components.http as http
from homeassistant.components.http import KEY_AUTHENTICATED
from homeassistant.components.http.ban import (
IP_BANS_FILE,
KEY_BANNED_IPS,
KEY_FAILED_LOGIN_ATTEMPTS,
IpBan,
setup_bans,
)
from homeassistant.components.http.view import request_handler_factory
from homeassistant.const import HTTP_FORBIDDEN
from homeassistant.setup import async_setup_component
from . import mock_real_ip
from tests.common import async_mock_service
SUPERVISOR_IP = "1.2.3.4"
BANNED_IPS = ["200.201.202.203", "100.64.0.2"]
BANNED_IPS_WITH_SUPERVISOR = BANNED_IPS + [SUPERVISOR_IP]
@pytest.fixture(name="hassio_env")
def hassio_env_fixture():
"""Fixture to inject hassio env."""
with patch.dict(os.environ, {"HASSIO": "127.0.0.1"}), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value={"result": "ok", "data": {}},
), patch.dict(os.environ, {"HASSIO_TOKEN": "123456"}):
yield
@pytest.fixture(autouse=True)
def gethostbyaddr_mock():
"""Fixture to mock out I/O on getting host by address."""
with patch(
"homeassistant.components.http.ban.gethostbyaddr",
return_value=("example.com", ["0.0.0.0.in-addr.arpa"], ["0.0.0.0"]),
):
yield
async def test_access_from_banned_ip(hass, aiohttp_client):
"""Test accessing to server from banned IP. Both trusted and not."""
app = web.Application()
app["hass"] = hass
setup_bans(hass, app, 5)
set_real_ip = mock_real_ip(app)
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config",
return_value=[IpBan(banned_ip) for banned_ip in BANNED_IPS],
):
client = await aiohttp_client(app)
for remote_addr in BANNED_IPS:
set_real_ip(remote_addr)
resp = await client.get("/")
assert resp.status == HTTP_FORBIDDEN
@pytest.mark.parametrize(
"remote_addr, bans, status",
list(
zip(
BANNED_IPS_WITH_SUPERVISOR, [1, 1, 0], [HTTP_FORBIDDEN, HTTP_FORBIDDEN, 401]
)
),
)
async def test_access_from_supervisor_ip(
remote_addr, bans, status, hass, aiohttp_client, hassio_env
):
"""Test accessing to server from supervisor IP."""
app = web.Application()
app["hass"] = hass
async def unauth_handler(request):
"""Return a mock web response."""
raise HTTPUnauthorized
app.router.add_get("/", unauth_handler)
setup_bans(hass, app, 1)
mock_real_ip(app)(remote_addr)
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config", return_value=[]
):
client = await aiohttp_client(app)
assert await async_setup_component(hass, "hassio", {"hassio": {}})
m_open = mock_open()
with patch.dict(os.environ, {"SUPERVISOR": SUPERVISOR_IP}), patch(
"homeassistant.components.http.ban.open", m_open, create=True
):
resp = await client.get("/")
assert resp.status == 401
assert len(app[KEY_BANNED_IPS]) == bans
assert m_open.call_count == bans
# second request should be forbidden if banned
resp = await client.get("/")
assert resp.status == status
assert len(app[KEY_BANNED_IPS]) == bans
async def test_ban_middleware_not_loaded_by_config(hass):
"""Test accessing to server from banned IP when feature is off."""
with patch("homeassistant.components.http.setup_bans") as mock_setup:
await async_setup_component(
hass, "http", {"http": {http.CONF_IP_BAN_ENABLED: False}}
)
assert len(mock_setup.mock_calls) == 0
async def test_ban_middleware_loaded_by_default(hass):
"""Test accessing to server from banned IP when feature is off."""
with patch("homeassistant.components.http.setup_bans") as mock_setup:
await async_setup_component(hass, "http", {"http": {}})
assert len(mock_setup.mock_calls) == 1
async def test_ip_bans_file_creation(hass, aiohttp_client):
"""Testing if banned IP file created."""
notification_calls = async_mock_service(hass, "persistent_notification", "create")
app = web.Application()
app["hass"] = hass
async def unauth_handler(request):
"""Return a mock web response."""
raise HTTPUnauthorized
app.router.add_get("/", unauth_handler)
setup_bans(hass, app, 2)
mock_real_ip(app)("200.201.202.204")
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config",
return_value=[IpBan(banned_ip) for banned_ip in BANNED_IPS],
):
client = await aiohttp_client(app)
m_open = mock_open()
with patch("homeassistant.components.http.ban.open", m_open, create=True):
resp = await client.get("/")
assert resp.status == 401
assert len(app[KEY_BANNED_IPS]) == len(BANNED_IPS)
assert m_open.call_count == 0
resp = await client.get("/")
assert resp.status == 401
assert len(app[KEY_BANNED_IPS]) == len(BANNED_IPS) + 1
m_open.assert_called_once_with(hass.config.path(IP_BANS_FILE), "a")
resp = await client.get("/")
assert resp.status == HTTP_FORBIDDEN
assert m_open.call_count == 1
assert len(notification_calls) == 3
assert (
notification_calls[0].data["message"]
== "Login attempt or request with invalid authentication from example.com (200.201.202.204). See the log for details."
)
async def test_failed_login_attempts_counter(hass, aiohttp_client):
"""Testing if failed login attempts counter increased."""
app = web.Application()
app["hass"] = hass
async def auth_handler(request):
"""Return 200 status code."""
return None, 200
app.router.add_get(
"/auth_true", request_handler_factory(Mock(requires_auth=True), auth_handler)
)
app.router.add_get(
"/auth_false", request_handler_factory(Mock(requires_auth=True), auth_handler)
)
app.router.add_get(
"/", request_handler_factory(Mock(requires_auth=False), auth_handler)
)
setup_bans(hass, app, 5)
remote_ip = ip_address("200.201.202.204")
mock_real_ip(app)("200.201.202.204")
@middleware
async def mock_auth(request, handler):
"""Mock auth middleware."""
if "auth_true" in request.path:
request[KEY_AUTHENTICATED] = True
else:
request[KEY_AUTHENTICATED] = False
return await handler(request)
app.middlewares.append(mock_auth)
client = await aiohttp_client(app)
resp = await client.get("/auth_false")
assert resp.status == 401
assert app[KEY_FAILED_LOGIN_ATTEMPTS][remote_ip] == 1
resp = await client.get("/auth_false")
assert resp.status == 401
assert app[KEY_FAILED_LOGIN_ATTEMPTS][remote_ip] == 2
resp = await client.get("/")
assert resp.status == 200
assert app[KEY_FAILED_LOGIN_ATTEMPTS][remote_ip] == 2
# This used to check that with trusted networks we reset login attempts
# We no longer support trusted networks.
resp = await client.get("/auth_true")
assert resp.status == 200
assert app[KEY_FAILED_LOGIN_ATTEMPTS][remote_ip] == 2
|
{
"content_hash": "14792318d4e0b2e53393c7e65b10ebba",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 130,
"avg_line_length": 32.18376068376068,
"alnum_prop": 0.6495817288540698,
"repo_name": "turbokongen/home-assistant",
"id": "717bd9564c0220286f11ef8d33699efb807c2708",
"size": "7531",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/http/test_ban.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
"""Python I/O functions.
:author: jao <jao@bigml.com>
:date: Wed Apr 08, 2015-2022 17:52
"""
import csv
class UnicodeReader():
"""Adapter to read files
"""
def __init__(self, filename, dialect=csv.excel,
encoding="utf-8", **kwargs):
"""Constructor method for the reader
"""
self.filename = filename
self.dialect = dialect
self.encoding = encoding
self.kwargs = kwargs
self.file_handler = None
self.reader = None
def open_reader(self):
"""Opening the file
"""
if self.filename.__class__.__name__ == 'UTF8Recoder':
self.file_handler = self.filename
else:
self.file_handler = open(self.filename, 'rt',
encoding=self.encoding, newline='')
self.reader = csv.reader(self.file_handler, dialect=self.dialect,
**self.kwargs)
return self
def __enter__(self):
"""Opening files
"""
return self.open_reader()
def __exit__(self, ftype, value, traceback):
"""Closing on exit
"""
self.close_reader()
def __next__(self):
"""Reading records
"""
return next(self.reader)
def __iter__(self):
"""Iterator
"""
return self
def close_reader(self):
"""Closing the file
"""
if not self.filename.__class__.__name__ == 'UTF8Recoder':
self.file_handler.close()
class UnicodeWriter():
"""Adapter to write files
"""
def __init__(self, filename, dialect=csv.excel,
encoding="utf-8", **kwargs):
"""Constructor method for the writer
"""
self.filename = filename
self.dialect = dialect
self.encoding = encoding
self.kwargs = kwargs
self.file_handler = None
self.writer = None
def open_writer(self):
"""Opening the file
"""
self.file_handler = open(self.filename, 'wt',
encoding=self.encoding, newline='')
self.writer = csv.writer(self.file_handler, dialect=self.dialect,
**self.kwargs)
return self
def close_writer(self):
"""Closing the file
"""
self.file_handler.close()
def __enter__(self):
"""Opening the file
"""
return self.open_writer()
def __exit__(self, ftype, value, traceback):
"""Closing on exit
"""
self.close_writer()
def writerow(self, row):
"""Writer emulating CSV writerow
"""
self.writer.writerow(row)
def writerows(self, rows):
"""Writer emulating CSV writerows
"""
for row in rows:
self.writerow(row)
|
{
"content_hash": "4884201aa81aaf1f2c53c87440d1480b",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 73,
"avg_line_length": 22.566929133858267,
"alnum_prop": 0.5129099790648988,
"repo_name": "bigmlcom/python",
"id": "c3856e24787b0d9c93e18d9dbc9d3462d560c118",
"size": "3501",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigml/io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1799772"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("organisations", "0011_organisationdivision_seats_total")]
operations = [
migrations.CreateModel(
name="OrganisationDivisionSet",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("start_date", models.DateField(null=True)),
("end_date", models.DateField(null=True)),
(
"organisation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="divisionset",
to="organisations.Organisation",
),
),
],
),
migrations.AlterModelOptions(
name="organisationdivision", options={"ordering": ("name",)}
),
migrations.RemoveField(model_name="organisationdivision", name="end_date"),
migrations.RemoveField(model_name="organisationdivision", name="start_date"),
migrations.AlterField(
model_name="organisationdivision",
name="organisation",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="divisions",
to="organisations.Organisation",
),
),
migrations.AddField(
model_name="organisationdivision",
name="divisionset",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="divisions",
to="organisations.OrganisationDivisionSet",
),
),
]
|
{
"content_hash": "3870bd96b8e01c20d130debef5f7c28d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 85,
"avg_line_length": 34.85,
"alnum_prop": 0.501195600191296,
"repo_name": "DemocracyClub/EveryElection",
"id": "74799ff92178b4cc3a52eaeee947b225a6cee187",
"size": "2164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "every_election/apps/organisations/migrations/0012_auto_20170111_1441.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "37294"
},
{
"name": "JavaScript",
"bytes": "3930"
},
{
"name": "Python",
"bytes": "548734"
},
{
"name": "SCSS",
"bytes": "3314"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import datetime
import unittest
from django.test import TransactionTestCase
from django.db import connection, DatabaseError, IntegrityError, OperationalError
from django.db.models.fields import IntegerField, TextField, CharField, SlugField, BooleanField, BinaryField
from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.db.transaction import atomic
from .models import (Author, AuthorWithM2M, Book, BookWithLongName,
BookWithSlug, BookWithM2M, Tag, TagIndexed, TagM2MTest, TagUniqueRename,
UniqueTest, Thing, TagThrough, BookWithM2MThrough, AuthorTag, AuthorWithM2MThrough,
AuthorWithEvenLongerName)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithM2M, Book, BookWithLongName, BookWithSlug,
BookWithM2M, Tag, TagIndexed, TagM2MTest, TagUniqueRename, UniqueTest,
Thing, TagThrough, BookWithM2MThrough, AuthorWithEvenLongerName
]
# Utility functions
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
with connection.cursor() as cursor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names(cursor)
for model in self.models:
# Remove any M2M tables first
for field in model._meta.local_many_to_many:
with atomic():
tbl = field.rel.through._meta.db_table
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
# Then remove the main tables
with atomic():
tbl = model._meta.db_table
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = dict(
(d[0], (connection.introspection.get_field_type(d[1], d), d))
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
)
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_indexes(cursor, table)
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Author.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
new_field = ForeignKey(Tag)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(
Book,
Book._meta.get_field_by_name("author")[0],
new_field,
strict=True,
)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with connection.schema_editor() as editor:
editor.add_field(
Author,
new_field,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(
Author,
new_field,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['surname'][0], "CharField")
self.assertEqual(columns['surname'][1][6],
connection.features.interprets_empty_strings_as_nulls)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(
Author,
new_field,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type, field_info = columns['awesome']
if connection.vendor == 'mysql':
self.assertEqual(field_type, 'IntegerField')
self.assertEqual(field_info.precision, 1)
else:
self.assertEqual(field_type, 'BooleanField')
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(
Author,
new_field,
)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns['thing']
self.assertEqual(field_type, 'IntegerField')
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(
Author,
new_field,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns['bits'][0], ("BinaryField", "TextField"))
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
Author,
Author._meta.get_field_by_name("name")[0],
new_field,
strict=True,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
Author,
new_field,
new_field2,
strict=True,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), False)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
# Alter the FK
new_field = ForeignKey(Author, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(
Book,
Book._meta.get_field_by_name("author")[0],
new_field,
strict=True,
)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(
Author,
Author._meta.get_field_by_name("name")[0],
new_field,
strict=True,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
def test_m2m_create(self):
"""
Tests M2M fields on models during creation
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(BookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(BookWithM2M._meta.get_field_by_name("tags")[0].rel.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m_create_through(self):
"""
Tests M2M fields on models during creation with through models
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(TagThrough)
editor.create_model(TagM2MTest)
editor.create_model(BookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(TagThrough)
self.assertEqual(columns['book_id'][0], "IntegerField")
self.assertEqual(columns['tag_id'][0], "IntegerField")
def test_m2m(self):
"""
Tests adding/removing M2M fields on models
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = ManyToManyField("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(AuthorWithM2M, "tags")
try:
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.rel.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(
Author,
new_field,
)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.rel.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(
Author,
new_field,
new_field,
)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(
Author,
new_field,
)
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.rel.through)
finally:
# Cleanup model states
AuthorWithM2M._meta.local_many_to_many.remove(new_field)
def test_m2m_through_alter(self):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(AuthorTag)
editor.create_model(AuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(AuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
with connection.schema_editor() as editor:
old_field = AuthorWithM2MThrough._meta.get_field_by_name("tags")[0]
new_field = ManyToManyField("schema.TagM2MTest", related_name="authors", through="AuthorTag")
new_field.contribute_to_class(AuthorWithM2MThrough, "tags")
editor.alter_field(
Author,
old_field,
new_field,
)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(AuthorTag)), 3)
def test_m2m_repoint(self):
"""
Tests repointing M2M fields
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
constraints = self.get_constraints(BookWithM2M._meta.get_field_by_name("tags")[0].rel.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id'))
break
else:
self.fail("No FK constraint for tagm2mtest_id found")
# Repoint the M2M
new_field = ManyToManyField(UniqueTest)
new_field.contribute_to_class(BookWithM2M, "uniques")
try:
with connection.schema_editor() as editor:
editor.alter_field(
Author,
BookWithM2M._meta.get_field_by_name("tags")[0],
new_field,
)
# Ensure old M2M is gone
self.assertRaises(DatabaseError, self.column_classes, BookWithM2M._meta.get_field_by_name("tags")[0].rel.through)
# Ensure the new M2M exists and points to UniqueTest
constraints = self.get_constraints(new_field.rel.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["uniquetest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id'))
break
else:
self.fail("No FK constraint for uniquetest_id found")
finally:
# Cleanup through table separately
with connection.schema_editor() as editor:
editor.remove_field(BookWithM2M, BookWithM2M._meta.get_field_by_name("uniques")[0])
# Cleanup model states
BookWithM2M._meta.local_many_to_many.remove(new_field)
del BookWithM2M._meta._m2m_cache
@unittest.skipUnless(connection.features.supports_column_check_constraints, "No check constraints")
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
# Alter the column to remove it
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(
Author,
Author._meta.get_field_by_name("height")[0],
new_field,
strict=True,
)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
with connection.schema_editor() as editor:
editor.alter_field(
Author,
new_field,
Author._meta.get_field_by_name("height")[0],
strict=True,
)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(
Tag,
Tag._meta.get_field_by_name("slug")[0],
new_field,
strict=True,
)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_new_field = SlugField(unique=True)
new_new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(
Tag,
new_field,
new_new_field,
strict=True,
)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(
Tag,
Tag._meta.get_field_by_name("slug")[0],
TagUniqueRename._meta.get_field_by_name("slug2")[0],
strict=True,
)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
self.assertRaises(IntegrityError, TagUniqueRename.objects.create, title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest,
UniqueTest._meta.unique_together,
[],
)
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_new_field = SlugField(unique=True)
new_new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest,
[],
UniqueTest._meta.unique_together,
)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(
Tag,
[],
[("slug", "title")],
)
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_new_field = SlugField(unique=True)
new_new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(
Tag,
[("slug", "title")],
[],
)
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
def test_db_table(self):
"""
Tests renaming of the table
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(
Author,
"schema_author",
"schema_otherauthor",
)
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(
Author,
"schema_otherauthor",
"schema_author",
)
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(
Book,
Book._meta.get_field_by_name("title")[0],
new_field,
strict=True,
)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
with connection.schema_editor() as editor:
editor.alter_field(
Book,
new_field,
Book._meta.get_field_by_name("title")[0],
strict=True,
)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
with connection.schema_editor() as editor:
editor.add_field(
Book,
BookWithSlug._meta.get_field_by_name("slug")[0],
)
self.assertIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field2 = CharField(max_length=20, unique=False)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(
BookWithSlug,
BookWithSlug._meta.get_field_by_name("slug")[0],
new_field2,
strict=True,
)
self.assertNotIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['id']['primary_key'],
)
# Alter to change the PK
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, Tag._meta.get_field_by_name("id")[0])
editor.alter_field(
Tag,
Tag._meta.get_field_by_name("slug")[0],
new_field,
)
# Ensure the PK changed
self.assertNotIn(
'id',
self.get_indexes(Tag._meta.db_table),
)
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['slug']['primary_key'],
)
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(AuthorWithEvenLongerName, related_name="something")
new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk")
with connection.schema_editor() as editor:
editor.add_field(
BookWithLongName,
new_field,
)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail("Errors when applying initial migration for a model "
"with a table named after a SQL reserved word: %s" % e)
# Check that it's there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Thing.objects.all()),
)
|
{
"content_hash": "11b103edc95482282a1c8149e13f65d5",
"timestamp": "",
"source": "github",
"line_count": 951,
"max_line_length": 125,
"avg_line_length": 39.4794952681388,
"alnum_prop": 0.5704887468371288,
"repo_name": "YYWen0o0/python-frame-django",
"id": "64726aff4ed4df74b0cc092ec75f377fcd14b100",
"size": "37545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/schema/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53353"
},
{
"name": "JavaScript",
"bytes": "102434"
},
{
"name": "Python",
"bytes": "9808771"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
"""
SQLite3 backend for the sqlite3 module in the standard library.
"""
import decimal
import math
import re
import warnings
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
Database.register_converter("bool", decoder(lambda s: s == '1'))
Database.register_converter("time", decoder(parse_time))
Database.register_converter("date", decoder(parse_date))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_converter("decimal", decoder(backend_utils.typecast_decimal))
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
if self.in_atomic_block:
# sqlite3 cannot disable constraint checking inside a transaction.
return False
self.cursor().execute('PRAGMA foreign_keys = OFF')
return True
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raise an IntegrityError on the first invalid foreign key reference
encountered (if any) and provide detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, int):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, int):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return (left - right).total_seconds() * 1000000
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
|
{
"content_hash": "33d1fbdd1d9af57b96677913e6ce9ae0",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 105,
"avg_line_length": 38.55051546391753,
"alnum_prop": 0.6095630315023801,
"repo_name": "evansd/django",
"id": "e820c6bd2e2ba885868bf1b2ea5d2f5c84b7a796",
"size": "18697",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/db/backends/sqlite3/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55975"
},
{
"name": "HTML",
"bytes": "203931"
},
{
"name": "JavaScript",
"bytes": "253392"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12009521"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import sys
from taskbuffer.DBProxy import DBProxy
import userinterface.Client as Client
import urllib2,urllib,datetime,time
# password
from config import panda_config
passwd = panda_config.dbpasswd
if len(sys.argv) == 2:
startID = int(sys.argv[1])
endID = startID
else:
startID = int(sys.argv[1])
endID = int(sys.argv[2])
if startID > endID:
print '%d is less than %d' % (endID,startID)
sys.exit(1)
# instantiate DB proxies
proxyS = DBProxy()
proxyS.connect(panda_config.dbhost,panda_config.dbpasswd,panda_config.dbuser,panda_config.dbname)
# get PandaIDs from jobsDefined
res = proxyS.querySQL("SELECT dispatchDBlock from jobsDefined4 WHERE PandaID>=%s AND PandaID<=%s GROUP BY dispatchDBlock" % (startID,endID))
# emulate DDM callbacks
for dispatchDBlock, in res:
# get VUID and creationdate
resvuid = proxyS.querySQL("SELECT vuid from Datasets WHERE name='%s'" % dispatchDBlock)
if len(resvuid) == 1:
vuid, = resvuid[0]
# make HTTP request
node={'vuid':vuid}
url=Client.baseURLSSL+'/datasetCompleted'
rdata=urllib.urlencode(node)
req=urllib2.Request(url)
# invoke callback
fd=urllib2.urlopen(req,rdata)
|
{
"content_hash": "4c308b8622c430036425ae0d3aa22bff",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 140,
"avg_line_length": 30.170731707317074,
"alnum_prop": 0.6871463217461601,
"repo_name": "RRCKI/panda-server",
"id": "b33769d453ac1f1b980c7174a206cf0bd95846ca",
"size": "1237",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandaserver/test/activateJobs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "PLSQL",
"bytes": "23194"
},
{
"name": "Python",
"bytes": "2670522"
},
{
"name": "Shell",
"bytes": "16124"
}
],
"symlink_target": ""
}
|
"""Transport adapter for Requests."""
from __future__ import absolute_import
import cachecontrol
import functools
import logging
import numbers
import time
try:
import requests
except ImportError as error: # pragma: NO COVER
import six
six.raise_from(
ImportError(
"The requests library is not installed, please install the "
"requests package to use the requests transport."
),
error,
)
import requests.adapters # pylint: disable=ungrouped-imports
import requests.exceptions # pylint: disable=ungrouped-imports
from requests.packages.urllib3.util.ssl_ import (
create_urllib3_context,
) # pylint: disable=ungrouped-imports
import six # pylint: disable=ungrouped-imports
from gsi import transport
from gsi.verification import exceptions
_LOGGER = logging.getLogger(__name__)
_DEFAULT_TIMEOUT = 120 # in seconds
class _Response(transport.Response):
"""Requests transport response adapter.
Args:
response (requests.Response): The raw Requests response.
"""
def __init__(self, response):
self._response = response
@property
def status(self):
return self._response.status_code
@property
def headers(self):
return self._response.headers
@property
def data(self):
return self._response.content
class Request(transport.Request):
"""Requests request adapter.
This class is used internally for making requests using various transports
in a consistent way.
Args:
session (requests.Session): An instance :class:`requests.Session` used
to make HTTP requests. If not specified, a session will be created.
.. automethod:: __call__
"""
def __init__(self, session=None):
if not session:
session = requests.Session()
self.session = session
def __call__(
self,
url,
method="GET",
body=None,
headers=None,
timeout=_DEFAULT_TIMEOUT,
**kwargs
):
"""Make an HTTP request using requests.
Args:
url (str): The URI to be requested.
method (str): The HTTP method to use for the request. Defaults
to 'GET'.
body (bytes): The payload / body in HTTP request.
headers (Mapping[str, str]): Request headers.
timeout (Optional[int]): The number of seconds to wait for a
response from the server. If not specified or if None, the
requests default timeout will be used.
kwargs: Additional arguments passed through to the underlying
requests :meth:`~requests.Session.request` method.
Returns:
google.transport.Response: The HTTP response.
Raises:
google.verification.exceptions.TransportError: If any exception occurred.
"""
try:
_LOGGER.debug("Making request: %s %s", method, url)
response = self.session.request(
method, url, data=body, headers=headers, timeout=timeout, **kwargs
)
return _Response(response)
except requests.exceptions.RequestException as error:
new_error = exceptions.TransportError(error)
six.raise_from(new_error, error)
class CacheRequest(Request):
"""Requests request adapter.
This class is used internally for making requests using various transports
in a consistent way while caching responses when possible.
"""
def __init__(self):
session = requests.session()
cached_session = cachecontrol.CacheControl(session)
super().__init__(session=cached_session)
|
{
"content_hash": "e4b322ed635a316702cd36399c37532d",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 85,
"avg_line_length": 30.59016393442623,
"alnum_prop": 0.632636655948553,
"repo_name": "googleinterns/server-side-identity",
"id": "97b56c2a4a6a8f99bb36c2d98f254d433eaa6781",
"size": "4351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gsi/transport/request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "119586"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
import transaction
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid import testing
from .models import DBSession
from unittest.mock import patch
class EuweBlackBoxTests(unittest.TestCase):
# user max logs in by putting his username and password
# when he logs in he gets tactics to work on
# he tries and solve the problem
# then he gets the next problem
# he use the hint button
# he still can not do this, decide to try later
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_browser_title(self):
self.browser.get('http://localhost:6543')
self.assertIn('Euwe', self.browser.title)
def test_login_page(self):
self.browser.get('http://localhost:6543/login')
self.assertIn('Euwe', self.browser.title)
form = self.browser.find_element_by_tag_name('form')
self.assertIn('/login', form.get_attribute('action'))
inputbox = self.browser.find_element_by_id('id_username')
self.assertEqual(inputbox.get_attribute('placeholder'), 'username')
password = self.browser.find_element_by_id('id_password')
self.assertEqual(password.get_attribute('placeholder'), 'password')
submit = self.browser.find_element_by_name('form.submitted')
inputbox.send_keys('max')
password.send_keys('max123')
submit.submit()
def test_fen_page(self):
self.browser.get('http://localhost:6543/fen?id=1')
self.assertIn('Euwe', self.browser.title)
self.assertIn('r1bqkbnr/pppp1ppp/2n5/1B2p3/4P3/5N2/PPPP1PPP/RNBQK2R',
self.browser.page_source)
self.browser.quit()
self.browser = webdriver.Firefox()
self.browser.get('http://localhost:6543/fen?id=100')
self.assertNotIn('r1bqkbnr/pppp1ppp/2n5/1B2p3/4P3/5N2/PPPP1PPP/RNBQK2R',
self.browser.page_source)
def test_edit_page(self):
self.browser.get('http://localhost:6543/edit')
self.assertIn('Euwe', self.browser.title)
btn_start = self.browser.find_element_by_id('id_btn_start')
self.assertEqual(btn_start.get_attribute('value'), 'Start Position')
btn_clear = self.browser.find_element_by_id('id_btn_clear')
self.assertEqual(btn_clear.get_attribute('value'), 'Clear')
btn_save = self.browser.find_element_by_id('id_btn_save')
self.assertEqual(btn_save.get_attribute('value'), 'Save')
text_area = self.browser.find_element_by_id('id_text_area')
self.assertEqual(text_area.get_attribute('col'), '100')
self.assertEqual(text_area.get_attribute('height'), '20')
self.fail('Finish position edit !')
|
{
"content_hash": "96dfa80ecbd788d9c477f2fa0dddbc0b",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 80,
"avg_line_length": 37.58441558441559,
"alnum_prop": 0.6762266758811334,
"repo_name": "xydinesh/euwe",
"id": "eb6d33a1871db544ff35e8ebe25105a605cd7faa",
"size": "2894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "euwe/functional_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1926"
},
{
"name": "JavaScript",
"bytes": "44294"
},
{
"name": "Python",
"bytes": "30745"
}
],
"symlink_target": ""
}
|
import numpy as np
from pandas import read_csv
from sqlalchemy import and_
from sqlalchemy.sql.expression import func
from utils import xa, render_date, render_time
from orm import Float, Point
from etl import DATABASE_URL
CHUNK_SIZE=10000
DATA_COLS='ID,DATE,TIME,LAT,LON,PRESS,U,V,TEMP,Q_TIME,Q_POS,Q_PRESS,Q_VEL,Q_TEMP'.split(',')
METADATA_COLS='ID,PRINCIPAL_INVESTIGATOR,ORGANIZATION,EXPERIMENT,1st_DATE,1st_LAT,1st_LON,END_DATE,END_LAT,END_LON,TYPE,FILENAME'.split(',')
DATA_SEPARATOR=r'\s+'
METADATA_SEPARATOR=r'(?:\b|\))(?:\s*\t+\s*|\s\s)(?=[-0-9a-zA-Z])'
DEFAULT_START_DATE='1972-09-28'
DEFAULT_END_DATE='2015-01-01'
def point2csv(p):
return '%ld,%s,%s,%f,%f,%f,%f,%f,%f,%d,%d,%d,%d,%d' % (
p.float_id,
render_date(p.date),
render_time(p.date),
p.lat,
p.lon,
p.pressure,
p.u,
p.v,
p.temperature,
p.q_time,
p.q_pos,
p.q_press,
p.q_vel,
p.q_temp)
def filter_by_params(q,low_pressure=0,high_pressure=9999,start_date=DEFAULT_START_DATE,end_date=DEFAULT_END_DATE,experiment=None):
if experiment is not None:
q = q.filter(Float.experiment==experiment)
q = q.filter(Float.points.any(and_(Point.pressure > low_pressure,
Point.pressure < high_pressure,
Point.date >= start_date,
Point.date <= end_date)))
return q
def filter_by_geom(q,geom):
return q.filter(func.ST_Intersects(Float.track, geom))
def query_geom_data(geom,low_pressure=0,high_pressure=9999,start_date=DEFAULT_START_DATE,end_date=DEFAULT_END_DATE,experiment=None):
"""
Return all floats data in CSV format for any float which
intersects the given WKT geometry and pressure range
"""
yield ','.join(DATA_COLS)
with xa(DATABASE_URL) as session:
q = session.query(Point).join(Float)
q = filter_by_params(q,low_pressure,high_pressure,start_date,end_date,experiment)
q = filter_by_geom(q,geom)
for p in q:
yield point2csv(p)
def query_data(low_pressure=0,high_pressure=9999,start_date=DEFAULT_START_DATE,end_date=DEFAULT_END_DATE,experiment=None):
"""
Return all floats data in CSV format for any float which matches
"""
yield ','.join(DATA_COLS)
with xa(DATABASE_URL) as session:
q = session.query(Point).join(Float)
q = filter_by_params(q,low_pressure,high_pressure,start_date,end_date,experiment)
for p in q:
yield point2csv(p)
def get_track(float_id):
"""
Return float track in WKT
"""
with xa(DATABASE_URL) as session:
for f in session.query(func.ST_AsText(Float.track)).filter(Float.id==float_id):
return f[0]
return 'LINESTRING(0 0,0 0)' # dummy geometry if float is not found
def query_floats(low_pressure=0,high_pressure=9999,start_date=DEFAULT_START_DATE,end_date=DEFAULT_END_DATE,experiment=None):
"""
Return the IDs of all floats that intersect the given bounding box
and pressure range.
"""
with xa(DATABASE_URL) as session:
q = session.query(Float)
q = filter_by_params(q,low_pressure,high_pressure,start_date,end_date,experiment)
float_ids = [f.id for f in q]
return float_ids
def query_geom_floats(geom,low_pressure=0,high_pressure=9999,start_date=DEFAULT_START_DATE,end_date=DEFAULT_END_DATE,experiment=None):
"""
Return the IDs of all floats that intersect the given WKT geometry
and pressure range.
"""
with xa(DATABASE_URL) as session:
q = session.query(Float)
q = filter_by_params(q,low_pressure,high_pressure,start_date,end_date,experiment)
q = filter_by_geom(q,geom)
float_ids = [f.id for f in q]
return float_ids
def all_floats():
with xa(DATABASE_URL) as session:
float_ids = [f.id for f in session.query(Float)]
return float_ids
def get_metadata(float_id):
"""
Return all metadata for the given float, as a dict
"""
with xa(DATABASE_URL) as session:
for f in session.query(Float).filter(Float.id==float_id):
return f.get_metadata()
return {}
def all_experiments():
with xa(DATABASE_URL) as session:
return [_ for _ in session.query(Float.experiment).\
order_by(Float.experiment).\
distinct()]
# debug utilities
def count_floats():
with xa(DATABASE_URL) as session:
return session.query(Float.id).count()
def choose_random_float():
with xa(DATABASE_URL) as session:
for f in session.query(Float).order_by(func.random()).limit(1):
return f.id
return None
|
{
"content_hash": "4e9fb0d6b9ac84096fac0f31f1f1ed9e",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 140,
"avg_line_length": 34.77205882352941,
"alnum_prop": 0.6348065130048636,
"repo_name": "joefutrelle/noaa_floats",
"id": "a2991ea05c62d58959ade7e344c1cbff2fb36857",
"size": "4729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5954"
},
{
"name": "HTML",
"bytes": "4612"
},
{
"name": "JavaScript",
"bytes": "6272"
},
{
"name": "Python",
"bytes": "19300"
},
{
"name": "Shell",
"bytes": "139"
}
],
"symlink_target": ""
}
|
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.nephoscale import NephoscaleNodeDriver
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
class NephoScaleTest(unittest.TestCase, TestCaseMixin):
def setUp(self):
NephoscaleNodeDriver.connectionCls.conn_classes = (
NephoscaleMockHttp, NephoscaleMockHttp)
self.driver = NephoscaleNodeDriver('user', 'password')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 13)
for size in sizes:
self.assertEqual(type(size.disk), int)
self.assertEqual(type(size.ram), int)
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 18)
for image in images:
arch = image.extra.get('architecture')
self.assertTrue(arch.startswith('x86'))
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(len(locations), 2)
self.assertEqual(locations[0].name, "SJC-1")
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].extra.get('zone'), 'RIC-1')
self.assertEqual(nodes[0].name, 'mongodb-staging')
self.assertEqual(nodes[0].extra.get('service_type'),
'CS05 - 0.5GB, 1Core, 25GB')
def test_list_keys(self):
keys = self.driver.ex_list_keypairs()
self.assertEqual(len(keys), 2)
self.assertEqual(keys[0].name, 'mistio-ssh')
def test_list_ssh_keys(self):
ssh_keys = self.driver.ex_list_keypairs(ssh=True)
self.assertEqual(len(ssh_keys), 1)
self.assertTrue(ssh_keys[0].public_key.startswith('ssh-rsa'))
def test_list_password_keys(self):
password_keys = self.driver.ex_list_keypairs(password=True)
self.assertEqual(len(password_keys), 1)
self.assertEqual(password_keys[0].password, '23d493j5')
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.reboot_node(node)
self.assertTrue(result)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.destroy_node(node)
self.assertTrue(result)
def test_stop_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_stop_node(node)
self.assertTrue(result)
def test_start_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_start_node(node)
self.assertTrue(result)
def test_rename_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.rename_node(node, 'new-name')
self.assertTrue(result)
def test_create_node(self):
name = 'mongodb-staging'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[3]
node = self.driver.create_node(name=name,
size=size,
nowait=True,
image=image)
self.assertEqual(node.name, 'mongodb-staging')
def test_create_node_no_name(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[3]
self.assertRaises(TypeError, self.driver.create_node, size=size,
image=image)
def test_delete_ssh_keys(self):
self.assertTrue(self.driver.ex_delete_keypair(key_id=72209, ssh=True))
def test_delete_password_keys(self):
self.assertTrue(self.driver.ex_delete_keypair(key_id=72211))
class NephoscaleMockHttp(MockHttp):
fixtures = ComputeFileFixtures('nephoscale')
def _server_type_cloud(self, method, url, body, headers):
body = self.fixtures.load('list_sizes.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _server_cloud(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('success_action.json')
else:
body = self.fixtures.load('list_nodes.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _image_server(self, method, url, body, headers):
body = self.fixtures.load('list_images.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _datacenter_zone(self, method, url, body, headers):
body = self.fixtures.load('list_locations.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _key(self, method, url, body, headers):
body = self.fixtures.load('list_keys.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _key_sshrsa(self, method, url, body, headers):
body = self.fixtures.load('list_ssh_keys.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _key_password(self, method, url, body, headers):
body = self.fixtures.load('list_password_keys.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _server_cloud_88241(self, method, url, body, headers):
body = self.fixtures.load('success_action.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _server_cloud_88241_initiator_restart(self, method, url, body,
headers):
body = self.fixtures.load('success_action.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _server_cloud_88241_initiator_start(self, method, url, body, headers):
body = self.fixtures.load('success_action.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _server_cloud_88241_initiator_stop(self, method, url, body, headers):
body = self.fixtures.load('success_action.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _key_password_72211(self, method, url, body, headers):
body = self.fixtures.load('success_action.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _key_sshrsa_72209(self, method, url, body, headers):
body = self.fixtures.load('success_action.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
{
"content_hash": "fc3243f6f71e6b183518b0d5fb560649",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 78,
"avg_line_length": 39.49707602339181,
"alnum_prop": 0.6123778501628665,
"repo_name": "Hybrid-Cloud/badam",
"id": "7c22c6577b32125a0772bd334b58e1937e5e7e27",
"size": "7603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "patches_tool/aws_patch/aws_deps/libcloud/test/compute/test_nephoscale.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "29372474"
},
{
"name": "Shell",
"bytes": "17334"
}
],
"symlink_target": ""
}
|
""" ib.ext.cfg.TagValue -> config module for TagValue.java.
"""
from java2python.config.default import modulePrologueHandlers
modulePrologueHandlers += [
'from ib.lib.overloading import overloaded',
]
|
{
"content_hash": "343cf1ab4d60f5e72fc497e994ed2033",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 61,
"avg_line_length": 26.375,
"alnum_prop": 0.7440758293838863,
"repo_name": "chris-ch/IbPy",
"id": "4d49843af2435d8b7a033e1ed22812f736f8a5c8",
"size": "257",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "ib/ext/cfg/TagValue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3479"
},
{
"name": "Python",
"bytes": "299885"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class ContactsConfig(AppConfig):
name = 'sno_contacts'
|
{
"content_hash": "9f71e149c3cbfac25dcaae30e8a6a0cc",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 19,
"alnum_prop": 0.7578947368421053,
"repo_name": "glad-web-developer/zab_sno",
"id": "be432097e30fd2f6abfadcebab28b73ff2888032",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sno_contacts/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4408"
},
{
"name": "HTML",
"bytes": "11132"
},
{
"name": "JavaScript",
"bytes": "438"
},
{
"name": "Python",
"bytes": "42257"
}
],
"symlink_target": ""
}
|
import proto # type: ignore
from google.ads.googleads.v11.resources.types import ad_group_ad_label
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.services",
marshal="google.ads.googleads.v11",
manifest={
"MutateAdGroupAdLabelsRequest",
"AdGroupAdLabelOperation",
"MutateAdGroupAdLabelsResponse",
"MutateAdGroupAdLabelResult",
},
)
class MutateAdGroupAdLabelsRequest(proto.Message):
r"""Request message for
[AdGroupAdLabelService.MutateAdGroupAdLabels][google.ads.googleads.v11.services.AdGroupAdLabelService.MutateAdGroupAdLabels].
Attributes:
customer_id (str):
Required. ID of the customer whose ad group
ad labels are being modified.
operations (Sequence[google.ads.googleads.v11.services.types.AdGroupAdLabelOperation]):
Required. The list of operations to perform
on ad group ad labels.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="AdGroupAdLabelOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
class AdGroupAdLabelOperation(proto.Message):
r"""A single operation (create, remove) on an ad group ad label.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
create (google.ads.googleads.v11.resources.types.AdGroupAdLabel):
Create operation: No resource name is
expected for the new ad group ad label.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: A resource name for the ad group ad label
being removed, in this format:
``customers/{customer_id}/adGroupAdLabels/{ad_group_id}~{ad_id}_{label_id}``
This field is a member of `oneof`_ ``operation``.
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=ad_group_ad_label.AdGroupAdLabel,
)
remove = proto.Field(proto.STRING, number=2, oneof="operation",)
class MutateAdGroupAdLabelsResponse(proto.Message):
r"""Response message for an ad group ad labels mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (for example, auth errors), we return
an RPC level error.
results (Sequence[google.ads.googleads.v11.services.types.MutateAdGroupAdLabelResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateAdGroupAdLabelResult",
)
class MutateAdGroupAdLabelResult(proto.Message):
r"""The result for an ad group ad label mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "b4af8dc3f438e71ad860fb72a957b510",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 129,
"avg_line_length": 35.64655172413793,
"alnum_prop": 0.667956469165659,
"repo_name": "googleads/google-ads-python",
"id": "f20bd42489409c78acfd0f0d401a0844e7011d19",
"size": "4735",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/services/types/ad_group_ad_label_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
'''
Collect all of the names within a scope
and attribute them to the containing Block or Module node.
'''
from ..util.dispatch import method_store, multimethod
from .. import node
from .. import scope
class CollectNames(object):
_store = method_store()
def __init__(self):
self.namespace = scope.Root()
@multimethod(_store)
def visit(self, n):
n['namespace'] = self.namespace
@visit.d(node.Block)
def _(self, n):
if 'def_context' not in n and 'spec_context' not in n:
self.namespace = self.namespace.child(n.unique_name)
n['namespace'] = self.namespace
@visit.d(node.Module)
def _(self, n):
self.namespace = self.namespace.child(n.unique_name)
n['namespace'] = self.namespace
@visit.d(node.Val)
def _(self, n):
ident = n.name.value
self.namespace.declare(ident, n)
n['namespace'] = self.namespace
@visit.d(node.Var)
def _(self, n):
ident = n.name.value
self.namespace.declare(ident, n)
n['namespace'] = self.namespace
@visit.d(node.Def)
def _(self, n):
ident = n.name.value
self.namespace.declare(ident, n)
n.body['def_context'] = n
n['namespace'] = self.namespace
self.namespace = self.namespace.child(n.unique_name)
@visit.d(node.Param)
def _(self, n):
ident = n.name.value
self.namespace.declare(ident, n)
n['namespace'] = self.namespace
@visit.d(node.Object)
def _(self, n):
self._visit_spec(n)
@visit.d(node.Trait)
def _(self, n):
self._visit_spec(n)
@visit.d(node.Proto)
def _(self, n):
self._visit_spec(n)
def _visit_spec(self, n):
ident = n.name.value
self.namespace.declare(ident, n)
n.body['spec_context'] = n
n['namespace'] = self.namespace
self.namespace = self.namespace.child(ident)
|
{
"content_hash": "095e97e39c735fc85c2e2885ee8b4472",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 64,
"avg_line_length": 22.95294117647059,
"alnum_prop": 0.583290620194772,
"repo_name": "dacjames/mara-lang",
"id": "60a67bbdbc582c33a33bde0a1927faa61e1150ba",
"size": "1951",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bootstrap/mara/passes/collect_names.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9315"
},
{
"name": "Python",
"bytes": "164554"
},
{
"name": "Ragel in Ruby Host",
"bytes": "8387"
}
],
"symlink_target": ""
}
|
import kdb
key1 = kdb.Key("user:/key1", kdb.KEY_VALUE, "some_value")
print("Key1 name=\"{0}\" value=\"{1}\"".format(key1.name, key1.value))
print("")
print("Every Key has properties. Some are read only, some are read+write.")
print("Properties of Key1:")
print(" key1.name = \"{0}\"".format(key1.name))
print(" key1.value = \"{0}\"".format(key1.value))
print(" key1.basename = \"{0}\"".format(key1.basename))
print("")
key1.value = b"some\0value\0"
print("Key1 is now binary: {0}".format(key1.isBinary()))
print("")
key2 = kdb.Key(key1.dup())
# or key2 = copy.copy(key1)
print("Key2 is a copy of Key1. Do they match? {0}".format(key1 == key2))
print("")
key1.name = "system:/key1"
print("We changed name of Key1. New name is \"{0}\"".format(key1.name))
print("Do they still match? {0}".format(key1 == key2))
print("")
key1.basename = "key1_changed"
print("Changing the basename only is possible as well. New name is \"{0}\"".format(key1.name))
print("")
key1.setMeta("foo", "bar")
key1.setMeta("owner", "manuel")
key1.setMeta("comment/#0", "this is my example key")
print("Keys can have metadata. We can iterate over or fetch them by name.")
print("Meta data of Key1 with their values:")
for meta in key1.getMeta():
print(" key1.{0} = \"{1}\"".format(meta.name, meta.value))
print("Remember: Metadata is returned as a Key object.")
print("")
|
{
"content_hash": "0cd9ab47ff1a4825d38215adae896702",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 94,
"avg_line_length": 34.175,
"alnum_prop": 0.6561814191660571,
"repo_name": "ElektraInitiative/libelektra",
"id": "1bef307d7bea8bd27f9076e8faee3d8ec15639c4",
"size": "1367",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/bindings/swig/python/examples/example_key.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "7774"
},
{
"name": "C",
"bytes": "4589018"
},
{
"name": "C++",
"bytes": "1620424"
},
{
"name": "CMake",
"bytes": "453752"
},
{
"name": "CSS",
"bytes": "6818"
},
{
"name": "Dockerfile",
"bytes": "97321"
},
{
"name": "Go",
"bytes": "37235"
},
{
"name": "Groovy",
"bytes": "43620"
},
{
"name": "HTML",
"bytes": "24613"
},
{
"name": "Inform 7",
"bytes": "394"
},
{
"name": "Java",
"bytes": "301170"
},
{
"name": "JavaScript",
"bytes": "272637"
},
{
"name": "Kotlin",
"bytes": "55441"
},
{
"name": "Less",
"bytes": "17593"
},
{
"name": "Lex",
"bytes": "17150"
},
{
"name": "Lua",
"bytes": "17205"
},
{
"name": "Makefile",
"bytes": "8660"
},
{
"name": "Mustache",
"bytes": "50110"
},
{
"name": "Objective-C",
"bytes": "6283"
},
{
"name": "Python",
"bytes": "130633"
},
{
"name": "QML",
"bytes": "87865"
},
{
"name": "QMake",
"bytes": "4256"
},
{
"name": "Roff",
"bytes": "791"
},
{
"name": "Ruby",
"bytes": "84714"
},
{
"name": "Rust",
"bytes": "92230"
},
{
"name": "SWIG",
"bytes": "77718"
},
{
"name": "Shell",
"bytes": "292676"
},
{
"name": "Tcl",
"bytes": "338"
},
{
"name": "Yacc",
"bytes": "4652"
}
],
"symlink_target": ""
}
|
this is another test file
|
{
"content_hash": "12f3040a74bf8127d178cad4e8414cef",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 26,
"alnum_prop": 0.8076923076923077,
"repo_name": "dmull1/pynet_test",
"id": "786a93a631dcd6b2356b525d2b20ac3349b6c5b9",
"size": "26",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "450"
}
],
"symlink_target": ""
}
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def AdminPasswordNotChangedEvent(vim, *args, **kwargs):
'''Default password for the Admin user on the host has not been changed.'''
obj = vim.client.factory.create('ns0:AdminPasswordNotChangedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
{
"content_hash": "6e0dabea25815db34ed07a4cd0970584",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 124,
"avg_line_length": 35.81818181818182,
"alnum_prop": 0.6049069373942471,
"repo_name": "xuru/pyvisdk",
"id": "dbac1f3c77301aa0c81ebf9f3a32ebbe7b62be46",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/admin_password_not_changed_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0006_remove_courseorg_tag'),
('courses', '0007_video_learn_times'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.Teacher', verbose_name='\u8bb2\u5e08'),
),
]
|
{
"content_hash": "4be2e899db6a52f4f4c00c922579f7fe",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 160,
"avg_line_length": 28.7,
"alnum_prop": 0.6428571428571429,
"repo_name": "tongxindao/Flask-micblog",
"id": "976003c518254ebdddba58fb79dedb5e98918433",
"size": "644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoocOnline/web/djangomooc/apps/courses/migrations/0008_course_teacher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11651"
},
{
"name": "Nginx",
"bytes": "564"
},
{
"name": "Python",
"bytes": "68663"
},
{
"name": "Shell",
"bytes": "159"
}
],
"symlink_target": ""
}
|
from perfkitbenchmarker import flags
NONE = 'None'
READ_ONLY = 'ReadOnly'
READ_WRITE = 'ReadWrite'
flags.DEFINE_enum(
'azure_host_caching', NONE,
[NONE, READ_ONLY, READ_WRITE],
'The type of host caching to use on Azure data disks.')
# Azure Storage Account types. See
# http://azure.microsoft.com/en-us/pricing/details/storage/ for more information
# about the different types.
LRS = 'LRS'
PLRS = 'PLRS'
ZRS = 'ZRS'
GRS = 'GRS'
RAGRS = 'RAGRS'
STORAGE = 'Storage'
BLOB_STORAGE = 'BlobStorage'
flags.DEFINE_enum(
'azure_storage_type', LRS,
[LRS, PLRS, ZRS, GRS, RAGRS],
'The type of storage account to create. See '
'http://azure.microsoft.com/en-us/pricing/details/storage/ for more '
'information. To use remote ssd scratch disks, you must use PLRS. If you '
'use PLRS, you must use the DS series of machines, or else VM creation '
'will fail.')
flags.DEFINE_enum(
'azure_blob_account_kind', BLOB_STORAGE,
[STORAGE, BLOB_STORAGE],
'The type of storage account to use for blob storage. Choosing Storage '
'will let you use ZRS storage. Choosing BlobStorage will give you access '
'to Hot and Cold storage tiers.')
|
{
"content_hash": "75da024f97f1ad96aea3b5c5a9bfba48",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 80,
"avg_line_length": 31.864864864864863,
"alnum_prop": 0.6938083121289228,
"repo_name": "meteorfox/PerfKitBenchmarker",
"id": "8825da0bbbe4f222c412ed707846043e0a1c6ed8",
"size": "1790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/providers/azure/flags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1843285"
},
{
"name": "Shell",
"bytes": "23474"
}
],
"symlink_target": ""
}
|
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from corpus_builder.templates.spider import CommonSpider
class MuktoMonaSpider(CommonSpider):
name = 'mukto_mona'
allowed_domains = ['blog.mukto-mona.com']
base_url = 'https://blog.mukto-mona.com/'
start_request_url = base_url
content_body = {
'css': 'div.post-content p::text'
}
rules = (
Rule(LinkExtractor(
restrict_css='div.post-content h2.entry-title'
),
callback='parse_content'),
)
allowed_configurations = [
['start_page'],
['start_page', 'end_page']
]
def request_index(self, response):
for page in range(self.start_page, self.end_page + 1):
yield scrapy.Request(self.base_url + 'page/{page}/'.format(page=page))
|
{
"content_hash": "b41de22de22ceb85b7bae88b5324870f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 82,
"avg_line_length": 26.59375,
"alnum_prop": 0.6251468860164512,
"repo_name": "banglakit/corpus-builder",
"id": "39e4f0b7d0b0c2dc18f54a2cfddb11dd2176169b",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corpus_builder/spiders/public_blog/mukto_mona.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38018"
}
],
"symlink_target": ""
}
|
'''
Created on Mar 18, 2015
pulls down and reads through the TCGA annotation and returns a map
of patient or sample or aliquot id to [[annotationClassificationName, categoryName],...]
of failed annotations only
@author: michael
json return layout
{
"dccAnnotation":[ {
"id":26882,
"dateCreated":"2015-03-06T14:42:56-05:00",
"createdBy":"LeraasK",
"status":"Approved",
"annotationCategory": {
"categoryId":25,
"categoryName":"Item may not meet study protocol",
"annotationClassification": {
"annotationClassificationId":1,
"annotationClassificationName":"Observation"
}
},
"items":[ {
"item":"TCGA-HT-7483",
"uuid":"183dd089-e932-4be2-b252-0e8572e7da4e",
"itemType": {
"itemTypeId":3,
"itemTypeName":"Patient"
},
"disease": {
"diseaseId":21,
"abbreviation":"LGG",
"description":"Brain Lower Grade Glioma"
},
"id":26338
} ],
"notes":[ {
"noteId":26427,
"noteText":"TSS confirmed that submitted tumor is a recurrence and patient had 2 prior resections before tumor submitted to BCR. Submitted tumor was in same tumor bed as primary. The patient had no prior chemo/radiation treatment. ",
"addedBy":"LeraasK",
"dateAdded":"2015-03-06T14:42:56-05:00"
} ],
"approved":true,
"rescinded":false
}
...
]
}
Annotation Classification Id
Observation 1
CenterNotification 2
Notification 3
Redaction 5
Annotation Category Id
*Redaction:Tumor tissue origin incorrect 1
*Redaction:Tumor type incorrect 2
*Redaction:Genotype mismatch 3
*Redaction:Subject withdrew consent 4
*Redaction:Subject identity unknown 5
Notification:Prior malignancy 6
Notification:Neoadjuvant therapy 7
Notification:Qualification metrics changed 8
Notification:Pathology outside specification 9
Notification:Molecular analysis outside specification 10
Notification:Duplicate item 11
Notification:Sample compromised 13
Notification:Clinical data insufficient 14
*Notification:Item does not meet study protocol 15
Notification:Item in special subset 17
Notification:Qualified in error 18
Notification:Item is noncanonical 21
Notification:New notification type 22
Observation:Tumor class but appears normal 23
Observation:Normal class but appears diseased 24
Observation:Item may not meet study protocol 25
Observation:New observation type 26
Redaction:Duplicate case 27
CenterNotification:Center QC failed 28
*CenterNotification:Item flagged DNU 29
Observation:General 30
Permanently missing item or object 36
Notification:WGA Failure 181
Normal tissue origin incorrect 35
Redaction:Administrative Compliance 37
*Notification:History of unacceptable prior treatment related to a prior/other malignancy 201
Notification:History of acceptable prior treatment related to a prior/other malignancy 202
Notification:Case submitted is found to be a recurrence after submission 203
Notification:Synchronous malignancy 204
*indicates do not include
'''
from datetime import datetime
import json
import urllib
def main():
print datetime.now(), 'start parse don\'t use aliquots with reason'
# get the annotations
print '\t', datetime.now(), 'start fetch annotations'
response = urllib.urlopen('https://tcga-data.nci.nih.gov/annotations/resources/searchannotations/json?')
print '\t', datetime.now(), 'finish fetch annotations'
print '\t', datetime.now(), 'start read annotations'
annotations = json.loads(response.read())['dccAnnotation']
print '\t', datetime.now(), 'finish read annotations'
exclude_annotation_catagories = [1,2,3,4,5,15,29,201]
barcode2annotation = {}
count = 0
count_bad = 0
print '\t', datetime.now(), 'start check annotations'
for annotation in annotations:
if 0 == count % 2048:
print '\t\tchecked %s annotations' % (count)
count += 1
annotationCategory = annotation['annotationCategory']
if annotationCategory['categoryId'] in exclude_annotation_catagories:
if not barcode2annotation.has_key(annotation['items'][0]['item']):
count_bad += 1
annotations = barcode2annotation.setdefault(annotation['items'][0]['item'], [])
annotations += [annotationCategory['annotationClassification']['annotationClassificationName'], annotationCategory['categoryName']]
print '\t', datetime.now(), 'finish check annotations--total %s, %s don\'t use' % (count, count_bad)
print datetime.now(), 'finished parse don\'t use aliquots with reason'
return barcode2annotation
if __name__ == '__main__':
main()
|
{
"content_hash": "e56f312783acc40196f7b3b79f1a9963",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 245,
"avg_line_length": 37.82442748091603,
"alnum_prop": 0.6716448032290615,
"repo_name": "cancerregulome/gidget",
"id": "836f3a5efbf8d8ef145d620515bccc0d8a7892d5",
"size": "4955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/feature_matrix_construction/util/http_AnnotationsManagerTCGA_map_dontuse.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "83724"
},
{
"name": "Python",
"bytes": "2253496"
},
{
"name": "Shell",
"bytes": "457404"
}
],
"symlink_target": ""
}
|
import io
import os
import mock
from nova import test
from nova import utils
from nova.virt.disk import api as disk_api
from nova.virt.disk.mount import api as mount
from nova.virt import driver
PROC_MOUNTS_CONTENTS = """rootfs / rootfs rw 0 0
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
udev /dev devtmpfs rw,relatime,size=1013160k,nr_inodes=253290,mode=755 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620 0 0
tmpfs /run tmpfs rw,nosuid,relatime,size=408904k,mode=755 0 0"""
class TestVirtDriver(test.NoDBTestCase):
def test_block_device(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
empty_block_device_info = {}
self.assertEqual(
driver.block_device_info_get_root(block_device_info), '/dev/sda')
self.assertIsNone(
driver.block_device_info_get_root(empty_block_device_info))
self.assertIsNone(driver.block_device_info_get_root(None))
self.assertEqual(
driver.block_device_info_get_swap(block_device_info), swap)
self.assertIsNone(driver.block_device_info_get_swap(
empty_block_device_info)['device_name'])
self.assertEqual(driver.block_device_info_get_swap(
empty_block_device_info)['swap_size'], 0)
self.assertIsNone(
driver.block_device_info_get_swap({'swap': None})['device_name'])
self.assertEqual(
driver.block_device_info_get_swap({'swap': None})['swap_size'],
0)
self.assertIsNone(
driver.block_device_info_get_swap(None)['device_name'])
self.assertEqual(
driver.block_device_info_get_swap(None)['swap_size'], 0)
self.assertEqual(
driver.block_device_info_get_ephemerals(block_device_info),
ephemerals)
self.assertEqual(
driver.block_device_info_get_ephemerals(empty_block_device_info),
[])
self.assertEqual(
driver.block_device_info_get_ephemerals(None),
[])
def test_swap_is_usable(self):
self.assertFalse(driver.swap_is_usable(None))
self.assertFalse(driver.swap_is_usable({'device_name': None}))
self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 0}))
self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 1}))
class FakeMount(object):
def __init__(self, image, mount_dir, partition=None, device=None):
self.image = image
self.partition = partition
self.mount_dir = mount_dir
self.linked = self.mapped = self.mounted = False
self.device = device
def do_mount(self):
self.linked = True
self.mapped = True
self.mounted = True
self.device = '/dev/fake'
return True
def do_umount(self):
self.linked = True
self.mounted = False
def do_teardown(self):
self.linked = False
self.mapped = False
self.mounted = False
self.device = None
class TestDiskImage(test.NoDBTestCase):
def setUp(self):
super(TestDiskImage, self).setUp()
def mock_proc_mounts(self, mock_open):
response = io.StringIO(unicode(PROC_MOUNTS_CONTENTS))
mock_open.return_value = response
@mock.patch('__builtin__.open')
def test_mount(self, mock_open):
self.mock_proc_mounts(mock_open)
image = '/tmp/fake-image'
mountdir = '/mnt/fake_rootfs'
fakemount = FakeMount(image, mountdir, None)
def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
return fakemount
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
dev = diskimage.mount()
self.assertEqual(diskimage._mounter, fakemount)
self.assertEqual(dev, '/dev/fake')
@mock.patch('__builtin__.open')
def test_umount(self, mock_open):
self.mock_proc_mounts(mock_open)
image = '/tmp/fake-image'
mountdir = '/mnt/fake_rootfs'
fakemount = FakeMount(image, mountdir, None)
def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
return fakemount
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
dev = diskimage.mount()
self.assertEqual(diskimage._mounter, fakemount)
self.assertEqual(dev, '/dev/fake')
diskimage.umount()
self.assertEqual(diskimage._mounter, None)
@mock.patch('__builtin__.open')
def test_teardown(self, mock_open):
self.mock_proc_mounts(mock_open)
image = '/tmp/fake-image'
mountdir = '/mnt/fake_rootfs'
fakemount = FakeMount(image, mountdir, None)
def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
return fakemount
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
dev = diskimage.mount()
self.assertEqual(diskimage._mounter, fakemount)
self.assertEqual(dev, '/dev/fake')
diskimage.teardown()
self.assertEqual(diskimage._mounter, None)
class TestVirtDisk(test.NoDBTestCase):
def setUp(self):
super(TestVirtDisk, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def test_lxc_setup_container(self):
image = '/tmp/fake-image'
container_dir = '/mnt/fake_rootfs/'
def proc_mounts(self, mount_point):
return None
def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
return FakeMount(imgfile, mountdir, partition)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
self.assertEqual(disk_api.setup_container(image, container_dir),
'/dev/fake')
def test_lxc_teardown_container(self):
def proc_mounts(self, mount_point):
mount_points = {
'/mnt/loop/nopart': '/dev/loop0',
'/mnt/loop/part': '/dev/mapper/loop0p1',
'/mnt/nbd/nopart': '/dev/nbd15',
'/mnt/nbd/part': '/dev/mapper/nbd15p1',
}
return mount_points[mount_point]
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
disk_api.teardown_container('/mnt/loop/nopart')
expected_commands += [
('umount', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/loop/part')
expected_commands += [
('umount', '/dev/mapper/loop0p1'),
('kpartx', '-d', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/nbd/nopart')
expected_commands += [
('blockdev', '--flushbufs', '/dev/nbd15'),
('umount', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.teardown_container('/mnt/nbd/part')
expected_commands += [
('blockdev', '--flushbufs', '/dev/nbd15'),
('umount', '/dev/mapper/nbd15p1'),
('kpartx', '-d', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
self.assertEqual(self.executes, expected_commands)
def test_lxc_teardown_container_with_namespace_cleaned(self):
def proc_mounts(self, mount_point):
return None
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
disk_api.teardown_container('/mnt/loop/nopart', '/dev/loop0')
expected_commands += [
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/loop/part', '/dev/loop0')
expected_commands += [
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/nbd/nopart', '/dev/nbd15')
expected_commands += [
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.teardown_container('/mnt/nbd/part', '/dev/nbd15')
expected_commands += [
('qemu-nbd', '-d', '/dev/nbd15'),
]
self.assertEqual(self.executes, expected_commands)
|
{
"content_hash": "5be5730260cb9711053e51cccc9bb203",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 77,
"avg_line_length": 36.97090909090909,
"alnum_prop": 0.5512934002163864,
"repo_name": "sacharya/nova",
"id": "4851b6f040cfcdeb4fbd0950fda5e924bd0773f5",
"size": "10842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/virt/test_virt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13505239"
},
{
"name": "Shell",
"bytes": "16239"
}
],
"symlink_target": ""
}
|
from ninfo import PluginBase
from ninfo import util
class ieeedb_plug(PluginBase):
"""This plugin looks up OUI in a SQL database"""
name = 'ieeedb'
title = 'Ieeedb'
description = 'Retrieve information from SQL database'
cache_timeout = 60*60
types = ['mac']
remote = True
local = True
def setup(self):
idbuser = self.plugin_config['username']
def get_info(self, arg):
argtype = util.get_type(arg)
print "ieeedb plugin type was " + argtype
if argtype == 'mac':
conn = psycopg2.connect("dbname=ouilookup user=%s" %(username))
cur = conn.cursor()
mac_org = (macaddr.replace(':', '')[:6]).upper()
cur.execute("select manufacturer from ouilookup where '%s' = oui;" %(mac_org))
try:
prompt = cur.fetchone()[0]
except:
prompt = "Update the database"
else:
prompt = "Invalid input"
print prompt.__dict__
|
{
"content_hash": "854ec75c34881bb5a6a42a63875f3769",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 90,
"avg_line_length": 30.17142857142857,
"alnum_prop": 0.5350378787878788,
"repo_name": "kraigu/ninfo-plugin-ieeedb",
"id": "0cc9d153692418fca98cb84661058bf98255aa90",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ninfo-plugin-ieeedb/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1151"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'hello.views.home'),
)
|
{
"content_hash": "b1a4833d9b74d56081ab71052344c7bb",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 51,
"avg_line_length": 23.6,
"alnum_prop": 0.6694915254237288,
"repo_name": "cslosiu/TweetList",
"id": "cbe94a8b7e4615ef40bd08e8596626cc37531cec",
"size": "118",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "myproject/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1313000"
}
],
"symlink_target": ""
}
|
"""
Finite element reference mappings.
"""
import numpy as nm
from sfepy.base.base import get_default, output
from sfepy.discrete.common.mappings import Mapping
from sfepy.discrete.common.extmods.mappings import CMapping
from sfepy.discrete.fem.poly_spaces import PolySpace
class FEMapping(Mapping):
"""
Base class for finite element mappings.
"""
def __init__(self, coors, conn, poly_space=None, gel=None, order=1):
self.coors = coors
self.conn = conn
try:
nm.take(self.coors, self.conn)
except IndexError:
output('coordinates shape: %s' % list(coors.shape))
output('connectivity: min: %d, max: %d' % (conn.min(), conn.max()))
msg = 'incompatible connectivity and coordinates (see above)'
raise IndexError(msg)
self.n_el, self.n_ep = conn.shape
self.dim = self.coors.shape[1]
if poly_space is None:
poly_space = PolySpace.any_from_args(None, gel, order,
base='lagrange',
force_bubble=False)
self.poly_space = poly_space
self.indices = slice(None)
def get_geometry(self):
"""
Return reference element geometry as a GeometryElement instance.
"""
return self.poly_space.geometry
def get_base(self, coors, diff=False):
"""
Get base functions or their gradient evaluated in given
coordinates.
"""
bf = self.poly_space.eval_base(coors, diff=diff)
return bf
def get_physical_qps(self, qp_coors):
"""
Get physical quadrature points corresponding to given reference
element quadrature points.
Returns
-------
qps : array
The physical quadrature points ordered element by element,
i.e. with shape (n_el, n_qp, dim).
"""
bf = self.get_base(qp_coors)
qps = nm.dot(nm.atleast_2d(bf.squeeze()), self.coors[self.conn])
# Reorder so that qps are really element by element.
qps = nm.ascontiguousarray(nm.swapaxes(qps, 0, 1))
return qps
class VolumeMapping(FEMapping):
"""
Mapping from reference domain to physical domain of the same space
dimension.
"""
def get_mapping(self, qp_coors, weights, poly_space=None, ori=None,
transform=None):
"""
Get the mapping for given quadrature points, weights, and
polynomial space.
Returns
-------
cmap : CMapping instance
The volume mapping.
"""
poly_space = get_default(poly_space, self.poly_space)
bf_g = self.get_base(qp_coors, diff=True)
ebf_g = poly_space.eval_base(qp_coors, diff=True, ori=ori,
force_axis=True, transform=transform)
flag = (ori is not None) or (ebf_g.shape[0] > 1)
cmap = CMapping(self.n_el, qp_coors.shape[0], self.dim,
poly_space.n_nod, mode='volume', flag=flag)
cmap.describe(self.coors, self.conn, bf_g, ebf_g, weights)
return cmap
class SurfaceMapping(FEMapping):
"""
Mapping from reference domain to physical domain of the space
dimension higher by one.
"""
def set_basis_indices(self, indices):
"""
Set indices to cell-based basis that give the facet-based basis.
"""
self.indices = indices
def get_base(self, coors, diff=False):
"""
Get base functions or their gradient evaluated in given
coordinates.
"""
bf = self.poly_space.eval_base(coors, diff=diff)
return nm.ascontiguousarray(bf[..., :self.dim-1:, self.indices])
def get_mapping(self, qp_coors, weights, poly_space=None, mode='surface'):
"""
Get the mapping for given quadrature points, weights, and
polynomial space.
Returns
-------
cmap : CMapping instance
The surface mapping.
"""
poly_space = get_default(poly_space, self.poly_space)
bf_g = self.get_base(qp_coors, diff=True)
if nm.allclose(bf_g, 0.0):
raise ValueError('zero base function gradient!')
cmap = CMapping(self.n_el, qp_coors.shape[0], self.dim,
poly_space.n_nod, mode=mode)
cmap.describe(self.coors, self.conn, bf_g, None, weights)
return cmap
|
{
"content_hash": "c0f4f0724ef999917b40b07c5a6752bd",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 31.28472222222222,
"alnum_prop": 0.578912319644839,
"repo_name": "lokik/sfepy",
"id": "ca385bafbcdee392dbb29475f718732d8e9163e9",
"size": "4505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sfepy/discrete/fem/mappings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "448969"
},
{
"name": "C++",
"bytes": "37842"
},
{
"name": "GLSL",
"bytes": "6058"
},
{
"name": "Makefile",
"bytes": "184"
},
{
"name": "PowerShell",
"bytes": "3118"
},
{
"name": "Python",
"bytes": "2701733"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
}
|
import os
import threading
import txaio
txaio.use_twisted()
from txaio import make_logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.wamp.types import PublishOptions
from autobahn.twisted.wamp import ApplicationSession
class MyPublisher(ApplicationSession):
log = make_logger()
def __init__(self, config):
self.ident = '{}:{}'.format(os.getpid(), threading.get_ident())
self.log.info('{klass}[{ident}].__init__(config={config})',
klass=self.__class__.__name__, ident=self.ident, config=str(config))
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
self.log.info('{klass}[{ident}].onJoin(details={details})',
klass=self.__class__.__name__, ident=self.ident, details=details)
n = 2
running = True
last_error = None
while running and n <= 2**25:
data = os.urandom(n)
try:
res = yield self.publish('com.example.topic1', data,
options=PublishOptions(acknowledge=True, exclude_me=False))
except Exception as e:
self.log.failure()
running = False
last_error = e
else:
self.log.info('{klass}[{ident}].publish(): succeeded for n={n}, res={res}',
klass=self.__class__.__name__, ident=self.ident, n=n, res=res)
n = n * 2
yield sleep(1)
if last_error:
self.log.info('Encountered error at n={n}', n=n)
else:
self.log.info('Finished (without error) at n={n}', n=n)
yield sleep(1)
yield self.publish('com.example.topic1', os.urandom(16), options=PublishOptions(acknowledge=True))
self.log.info('Ok, session still working - leaving now ..')
yield self.leave()
|
{
"content_hash": "6dcc94e4be2b6caf91717eb206677695",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 106,
"avg_line_length": 32.557377049180324,
"alnum_prop": 0.5664652567975831,
"repo_name": "meejah/crossbarexamples",
"id": "de73ad15791d9d9f3c598bc14d6473d6e47bc16d",
"size": "1986",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "containers/max_message_size/publisher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5120"
},
{
"name": "C#",
"bytes": "22931"
},
{
"name": "C++",
"bytes": "77209"
},
{
"name": "CSS",
"bytes": "270073"
},
{
"name": "Dockerfile",
"bytes": "1423"
},
{
"name": "Erlang",
"bytes": "16493"
},
{
"name": "HTML",
"bytes": "4683415"
},
{
"name": "Hack",
"bytes": "1691"
},
{
"name": "Java",
"bytes": "20795"
},
{
"name": "JavaScript",
"bytes": "3742210"
},
{
"name": "Jupyter Notebook",
"bytes": "335655"
},
{
"name": "Lua",
"bytes": "1233"
},
{
"name": "Makefile",
"bytes": "47961"
},
{
"name": "PHP",
"bytes": "47991"
},
{
"name": "PLSQL",
"bytes": "363209"
},
{
"name": "PLpgSQL",
"bytes": "7749"
},
{
"name": "Python",
"bytes": "726879"
},
{
"name": "SQLPL",
"bytes": "909"
},
{
"name": "Shell",
"bytes": "19566"
},
{
"name": "TSQL",
"bytes": "2937"
}
],
"symlink_target": ""
}
|
"""
Code for extracting relational triples from the ieer and conll2002 corpora.
Relations are stored internally as dictionaries ('reldicts').
The two serialization outputs are I{rtuple} and I{clause}.
- An I{rtuple} is a tuple of the form C{(subj, filler, obj)},
where C{subj} and C{obj} are pairs of Named Entity mentions, and C{filler} is the string of words
occurring between C{sub} and C{obj} (with no intervening NEs). Strings are printed via ``repr()`` to
circumvent locale variations in rendering utf-8 encoded strings.
- A I{clause} is an atom of the form C{relsym(subjsym, objsym)},
where the relation, subject and object have been canonicalized to single strings.
"""
# todo: get a more general solution to canonicalized symbols for clauses -- maybe use xmlcharrefs?
from nltk import defaultdict
from string import join
import re
import htmlentitydefs
from itertools import ifilter
# Dictionary that associates corpora with NE classes
NE_CLASSES = {
'ieer': ['LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION',
'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'],
'conll2002': ['LOC', 'PER', 'ORG']
}
# Allow abbreviated class labels
short2long = dict(LOC = 'LOCATION', ORG = 'ORGANIZATION', PER = 'PERSON')
long2short = dict(LOCATION ='LOC', ORGANIZATION = 'ORG', PERSON = 'PER')
def _expand(type):
"""
Expand an NE class name.
@type type: C{str}
@rtype: C{str}
"""
try:
return short2long[type]
except KeyError:
return type
def class_abbrev(type):
"""
Abbreviate an NE class name.
@type type: C{str}
@rtype: C{str}
"""
try:
return long2short[type]
except KeyError:
return type
def _join(lst, sep=' ', untag=False):
"""
Join a list into a string, turning tags tuples into tag strings or just words.
@param untag: if C{True}, omit the tag from tagged input strings.
@type lst: C{list}
@rtype: C{str}
"""
try:
return join(lst, sep=sep)
except TypeError:
if untag:
return join([tup[0] for tup in lst], sep=sep)
from nltk.tag import tuple2str
return join([tuple2str(tup) for tup in lst], sep=sep)
def descape_entity(m, defs=htmlentitydefs.entitydefs):
"""
Translate one entity to its ISO Latin value.
Inspired by example from effbot.org
"""
try:
return defs[m.group(1)]
except KeyError:
return m.group(0) # use as is
def list2sym(lst):
"""
Convert a list of strings into a canonical symbol.
@type lst: C{list}
@return: a Unicode string without whitespace
@rtype: C{unicode}
"""
sym = _join(lst, '_', untag=True)
sym = sym.lower()
ENT = re.compile("&(\w+?);")
sym = ENT.sub(descape_entity, sym)
sym = sym.replace('.', '')
return sym
def mk_pairs(tree):
"""
Group a chunk structure into a list of pairs of the form (list(str), L{Tree})
In order to facilitate the construction of (L{Tree}, string, L{Tree}) triples, this
identifies pairs whose first member is a list (possibly empty) of terminal
strings, and whose second member is a L{Tree} of the form (NE_label, terminals).
@param tree: a chunk tree
@return: a list of pairs (list(C{str}), L{Tree})
@rtype: C{list} of C{tuple}
"""
from nltk import Tree
pairs = []
pair = [[], None]
for dtr in tree:
if not isinstance(dtr, Tree):
pair[0].append(dtr)
else:
# dtr is a Tree
pair[1] = dtr
pairs.append(pair)
pair = [[], None]
return pairs
def mk_reldicts(pairs, window=5, trace=0):
"""
Converts the pairs generated by L{mk_pairs} into a 'reldict': a dictionary which
stores information about the subject and object NEs plus the filler between them.
Additionally, a left and right context of length =< window are captured (within
a given input sentence).
@param pairs: a pair of list(str) and L{Tree}, as generated by
@param window: a threshold for the number of items to include in the left and right context
@type window: C{int}
@return: 'relation' dictionaries whose keys are 'lcon', 'subjclass', 'subjtext', 'subjsym', 'filler', objclass', objtext', 'objsym' and 'rcon'
@rtype: C{list} of C{defaultdict}
"""
result = []
while len(pairs) > 2:
reldict = defaultdict(str)
reldict['lcon'] = _join(pairs[0][0][-window:])
reldict['subjclass'] = pairs[0][1].node
reldict['subjtext'] = _join(pairs[0][1].leaves())
reldict['subjsym'] = list2sym(pairs[0][1].leaves())
reldict['filler'] = _join(pairs[1][0])
reldict['objclass'] = pairs[1][1].node
reldict['objtext'] = _join(pairs[1][1].leaves())
reldict['objsym'] = list2sym(pairs[1][1].leaves())
reldict['rcon'] = _join(pairs[2][0][:window])
if trace:
print "(rel(%s, %s)" % (reldict['subjclass'], reldict['objclass'])
result.append(reldict)
pairs = pairs[1:]
return result
def relextract(subjclass, objclass, doc, corpus='ieer', pattern=None, window=10):
"""
Filter the output of L{mk_reldicts} according to specified NE classes and a filler pattern.
The parameters C{subjclass} and C{objclass} can be used to restrict the
Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION',
'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE').
@param subjclass: the class of the subject Named Entity.
@type subjclass: C{string}
@param objclass: the class of the object Named Entity.
@type objclass: C{string}
@param doc: input document
@type doc: L{ieer} document or a list of chunk trees
@param corpus: name of the corpus to take as input; possible values are
'ieer' and 'conll2002'
@type corpus: C{string}
@param pattern: a regular expression for filtering the fillers of
retrieved triples.
@type pattern: C{SRE_Pattern}
@param window: filters out fillers which exceed this threshold
@type window: C{int}
@return: see L{mk_reldicts}
@rtype: C{list} of C{defaultdict}
"""
if subjclass and subjclass not in NE_CLASSES[corpus]:
if _expand(subjclass) in NE_CLASSES[corpus]:
subjclass = _expand(subjclass)
else:
raise ValueError, "your value for the subject type has not been recognized: %s" % subjclass
if objclass and objclass not in NE_CLASSES[corpus]:
if _expand(objclass) in NE_CLASSES[corpus]:
objclass = _expand(objclass)
else:
raise ValueError, "your value for the object type has not been recognized: %s" % objclass
if corpus == 'ieer':
pairs = mk_pairs(doc.text) + mk_pairs(doc.headline)
elif corpus == 'conll2002':
pairs = mk_pairs(doc)
else:
raise ValueError, "corpus type not recognized"
reldicts = mk_reldicts(pairs)
relfilter = lambda x: (x['subjclass'] == subjclass and
len(x['filler'].split()) <= window and
pattern.match(x['filler']) and
x['objclass'] == objclass)
return filter(relfilter, reldicts)
def show_raw_rtuple(reldict, lcon=False, rcon=False):
"""
Pretty print the reldict as an rtuple.
@param reldict: a relation dictionary
@type reldict: C{defaultdict}
"""
items = [class_abbrev(reldict['subjclass']), reldict['subjtext'], reldict['filler'], class_abbrev(reldict['objclass']), reldict['objtext']]
format = '[%s: %r] %r [%s: %r]'
if lcon:
items = [reldict['lcon']] + items
format = '...%r)' + format
if rcon:
items.append(reldict['rcon'])
format = format + '(%r...'
printargs = tuple(items)
return format % printargs
def show_clause(reldict, relsym):
"""
Print the relation in clausal form.
@param reldict: a relation dictionary
@type reldict: C{defaultdict}
@param relsym: a label for the relation
@type relsym: C{str}
"""
items = (relsym, reldict['subjsym'], reldict['objsym'])
return "%s(%r, %r)" % items
#######################################################
# Demos of relation extraction with regular expressions
#######################################################
############################################
# Example of in(ORG, LOC)
############################################
def in_demo(trace=0):
from nltk.corpus import ieer
IN = re.compile(r'.*\bin\b(?!\b.+ing\b)')
print
print "in(ORG, LOC) -- just the clauses:"
print "=" * 45
for file in ieer.files():
for doc in ieer.parsed_docs(file):
if trace:
print doc.docno
print "=" * 15
for rel in relextract('ORG', 'LOC', doc, pattern=IN):
print show_clause(rel, relsym='IN')
############################################
# Example of has_role(PER, LOC)
############################################
def roles_demo(trace=0):
from nltk.corpus import ieer
roles = """
(.*( # assorted roles
analyst|
chair(wo)?man|
commissioner|
counsel|
director|
economist|
editor|
executive|
foreman|
governor|
head|
lawyer|
leader|
librarian).*)|
manager|
partner|
president|
producer|
professor|
researcher|
spokes(wo)?man|
writer|
,\sof\sthe?\s* # "X, of (the) Y"
"""
ROLES = re.compile(roles, re.VERBOSE)
print
print "has_role(PER, ORG) -- raw rtuples:"
print "=" * 45
for file in ieer.files():
for doc in ieer.parsed_docs(file):
lcon = rcon = False
if trace:
print doc.docno
print "=" * 15
lcon = rcon = True
for rel in relextract('PER', 'ORG', doc, pattern=ROLES):
print show_raw_rtuple(rel, lcon=lcon, rcon=rcon)
##############################################
### Show what's in the IEER Headlines
##############################################
#print "NER in Headlines"
#print "=" * 45
#for file in ieer.files():
#for doc in ieer.parsed_docs(file):
#tree = doc.headline
#if isinstance(tree, Tree):
#print doc.docno, tree.pprint()
#
#print join(tree.leaves())
#else:
#print tree
#############################################
## Dutch CONLL2002: take_on_role(PER, ORG
#############################################
def conllned(trace=1):
"""
Find the copula+'van' relation ('of') in the Dutch tagged training corpus
from CoNLL 2002.
"""
from nltk.corpus import conll2002
vnv = """
(
is/V|
was/V|
werd/V|
wordt/V
)
.*
van/Prep
"""
VAN = re.compile(vnv, re.VERBOSE)
print
print "van(PER, ORG) -- raw rtuples with context:"
print "=" * 45
for doc in conll2002.chunked_sents('ned.train'):
lcon = rcon = False
if trace:
lcon = rcon = True
for rel in relextract('PER', 'ORG', doc, corpus='conll2002', pattern=VAN):
print show_raw_rtuple(rel, lcon=lcon, rcon=rcon)
#############################################
## Spanish CONLL2002: (PER, ORG)
#############################################
def conllesp():
from nltk.corpus import conll2002
de = """
.*
(
de/SP|
del/SP
)
"""
DE = re.compile(de, re.VERBOSE)
print
print "de(ORG, LOC) -- just the first 10 clauses:"
print "=" * 45
rels = [rel for doc in conll2002.chunked_sents('esp.train')
for rel in relextract('ORG', 'LOC', doc, corpus='conll2002', pattern = DE)]
for r in rels[:10]: print show_clause(r, relsym='DE')
print
#s = 'mcglashan_&_sarrail'
#l = ['mcglashan', '&', 'sarrail']
#pattern = re.compile("&(\w+?);")
#new = list2sym(l)
#s = pattern.sub(descape_entity, s)
#print s, new
if __name__ == '__main__':
in_demo(trace=0)
#roles_demo(trace=0)
#conllned()
#conllesp()
|
{
"content_hash": "593580ee0f5568631a5e07bce159fed3",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 146,
"avg_line_length": 30.239709443099272,
"alnum_prop": 0.5584914724957963,
"repo_name": "hectormartinez/rougexstem",
"id": "10677db9d72291fe39282fd4c2c6ac73e747a6b4",
"size": "12702",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/sem/relextract.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "252646"
},
{
"name": "Batchfile",
"bytes": "2712"
},
{
"name": "C",
"bytes": "3446743"
},
{
"name": "C#",
"bytes": "3511"
},
{
"name": "CSS",
"bytes": "1240"
},
{
"name": "HTML",
"bytes": "315849"
},
{
"name": "M4",
"bytes": "4099"
},
{
"name": "Makefile",
"bytes": "199393"
},
{
"name": "Perl",
"bytes": "378641"
},
{
"name": "Perl6",
"bytes": "67212"
},
{
"name": "Python",
"bytes": "3712683"
},
{
"name": "Shell",
"bytes": "319340"
},
{
"name": "TeX",
"bytes": "536677"
},
{
"name": "XQuery",
"bytes": "5987"
},
{
"name": "XS",
"bytes": "45555"
}
],
"symlink_target": ""
}
|
import tempfile
from snakemake.shell import shell
from lcdblib.snakemake import helpers
extra = snakemake.params.get('extra', '')
log = snakemake.log_fmt_shell()
stranded = snakemake.params.get('stranded', False)
try:
stranded_int = {False: 0, True: 1, 'reverse': 2}[stranded]
except KeyError:
raise ValueError('"stranded" must be True|False|"reverse"')
paired = snakemake.params.get('paired', False)
try:
paired_bool= {True: 'TRUE', False: 'FALSE'}[paired]
except KeyError:
raise ValueError('"paired" must be True or False')
# To avoid issues with png() related to X11 and cairo, we can use bitmap() instead.
# (thanks
# http://stackoverflow.com/questions/24999983/
# r-unable-to-start-device-png-capabilities-has-true-for-png
# #comment52353278_25064603 )
script = """
library(dupRadar)
bam <- "{snakemake.input.bam}"
gtf <- "{snakemake.input.annotation}"
dm <- analyzeDuprates(bam, gtf, {stranded_int}, {paired_bool}, {snakemake.threads})
dm$mhRate <- (dm$allCountsMulti - dm$allCounts) / dm$allCountsMulti
bitmap(file="{snakemake.output.multimapping_histogram}")
hist(dm$mhRate, breaks=50, main=basename(bam),
xlab="Multimapping rate per gene", ylab="Frequency")
dev.off()
bitmap(file="{snakemake.output.density_scatter}")
duprateExpDensPlot(dm, main=basename(bam))
dev.off()
bitmap(file="{snakemake.output.expression_histogram}")
expressionHist(dm)
dev.off()
bitmap(file="{snakemake.output.expression_boxplot}")
par(mar=c(10,4,4,2)+.1)
duprateExpBoxplot(dm, main=basename(bam))
dev.off()
bitmap(file="{snakemake.output.expression_barplot}")
readcountExpBoxplot(dm)
dev.off()
write.table(dm, file="{snakemake.output.dataframe}", sep="\\t")
""".format(**locals())
tmp = tempfile.NamedTemporaryFile(delete=False).name
helpers.rscript(script, tmp, log=log)
|
{
"content_hash": "7785493d49d924819576b7c41b613d54",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 83,
"avg_line_length": 30.338983050847457,
"alnum_prop": 0.7318435754189944,
"repo_name": "lcdb/lcdb-wrapper-tests",
"id": "dc090bda32126f26dbdf4c849f335575c1482f25",
"size": "1790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wrappers/dupradar/wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75551"
},
{
"name": "Shell",
"bytes": "596"
}
],
"symlink_target": ""
}
|
import urllib2, json
UCONN_ROUTE_INFO_URL = 'http://www.uconnshuttle.com/Services/JSONPRelay.svc/GetRoutes'
ROUTE_STOPS_URL = 'http://www.uconnshuttle.com/Services/JSONPRelay.svc/GetMapStopEstimates'
VEHICLE_INFO_URL = 'http://www.uconnshuttle.com/Services/JSONPRelay.svc/GetMapVehiclePoints'
STOP_INFO_URL = 'http://www.uconnshuttle.com/Services/JSONPRelay.svc/GetStops'
def fetch_routes():
uconn_routes = urllib2.urlopen(UCONN_ROUTE_INFO_URL)
if (uconn_routes.getcode() == 200):
json_obj = json.load(uconn_routes)
for route_info in json_obj:
description = route_info["Description"]
route_id = route_info["RouteID"]
print description + " = %d" % route_id
def fetch_route_stops():
route_stops = urllib2.urlopen(ROUTE_STOPS_URL)
if (route_stops.getcode() == 200):
json_obj = json.load(route_stops)
for route_stop in json_obj:
route_stop_id = route_stop["RouteStopID"]
stop_order = route_stop["StopOrder"]
description = route_stop["description"]
for estimate in route_stop["estimates"]:
on_route = estimate["OnRoute"]
seconds_to_stop = estimate["SecondsToStop"]
vehicle_id = estimate["VehicleID"]
ending = "th"
if (stop_order == 1):
ending = "st"
elif (stop_order == 2):
ending = "nd"
elif (stop_order == 3):
ending = "rd"
print description + (" = %d" % route_stop_id) + (" is %d" % stop_order) + ending
def fetch_vehicle_info():
vehicle_info = urllib2.urlopen(VEHICLE_INFO_URL)
if (vehicle_info.getcode() == 200):
json_obj = json.load(vehicle_info)
for vehicle in json_obj:
ground_speed = vehicle["GroundSpeed"]
heading = vehicle["Heading"]
is_delayed = vehicle["IsDelayed"]
is_on_route = vehicle["IsOnRoute"]
latitude = vehicle["latitude"]
longitude = vehicle["Longitude"]
name = vehicle["Name"]
route_id = vehicle["RouteID"]
seconds = vehicle["Seconds"]
timestamp = vehicle["Timestamp"]
vehicle_id = vehicle["VehicleID"]
print ("Vehicle %d" + vehicle_id) + (" is going at a speed of %f" + ground_speed)
def fetch_stop_info():
stop_info = urllib2.urlopen(STOP_INFO_URL)
if (stop_info.getcode() == 200):
json_obj = json.load(stop_info)
for stop in json_obj:
description = stop["Description"]
latitude = stop["Latitude"]
longitude = stop["Longitude"]
for map_point in stop["MapPoints"]:
heading = map_point["Heading"]
latitude = map_point["Latitude"]
longitude = map_point["Longitude"]
print description + " is at " + ("%f, %f" % (latitude, longitude))
import math
def km_distance(lat1, lon1, lat2, lon2):
def to_radians(degrees):
return degrees * math.pi / 180.0
R = 6371
phi1 = to_radians(lat1)
phi2 = to_radians(lat2)
delta_phi = to_radians(lat2 - lat1)
delta_lambda = to_radians(lon2 - lon1)
a = math.pow(math.sin(delta_phi / 2.0), 2) + math.cos(phi1) * math.cos(phi2) * math.pow(math.sin(delta_lambda / 2.0), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
return d
# shape_id,shape_pt_lat,shape_pt_lon,shape_pt_sequence,shape_dist_traveled
class ShapeItem:
shape_id = None # will correspond to route_id
lat = None
lon = None
order = None
dist_traveled = None
# the original route IDs from the UConn JSON feed were not preserved
# so this method translates the original IDs to the IDs used in UC_GTFS
# the Late Night route is not supported, apparently
def translated_id(self, original):
if original == 3:
return 4
elif original == 5:
return 1
elif original == 11:
return 8
elif original == 19:
return 2
elif original == 21:
return 3
elif original == 22:
return 5
elif original == 24:
return 7
elif original == 25:
return 6
else:
return 'route ID not supported'
def __init__(self, shape_id, lat, lon):
self.shape_id = self.translated_id(shape_id)
self.lat = lat
self.lon = lon
def to_str(self):
str1 = str(self.shape_id) + "," + str(self.lat) + "," + str(self.lon) + ","
str2 = str(self.order) + "," + str(self.dist_traveled)
return str1 + str2
def load_shapes():
stop_info = urllib2.urlopen(STOP_INFO_URL)
file = open('/Users/trevphil/Desktop/UC_GTFS/shapes.txt', 'w')
if (stop_info.getcode() == 200):
json_obj = json.load(stop_info)
shapes = []
for stop in json_obj:
shape_id = stop["RouteID"]
if not shape_id == 13: # Late Night (which has RouteID = 13) is not supported
for map_point in stop["MapPoints"]:
latitude = map_point["Latitude"]
longitude = map_point["Longitude"]
shape = ShapeItem(shape_id, latitude, longitude)
shapes.append(shape)
last_shape_id = -1
order = 1
base_lat = 999
base_lon = 999
for shape in shapes:
if not shape.shape_id == last_shape_id:
order = 1
last_shape_id = shape.shape_id
base_lat = shape.lat
base_lon = shape.lon
shape.dist_traveled = 0.0
else:
shape.dist_traveled = km_distance(base_lat, base_lon, shape.lat, shape.lon)
shape.order = order
order = order + 1
file.write(shape.to_str() + "\n")
|
{
"content_hash": "53c060fab0d0f3fcddda2dc50991bc0a",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 124,
"avg_line_length": 38.506410256410255,
"alnum_prop": 0.5493590810720825,
"repo_name": "foss-transportationmodeling/rettina-server",
"id": "0f83a377149baecd11dcc26c01ccf2dcb9d1175e",
"size": "6390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fetcher.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2906488"
},
{
"name": "C++",
"bytes": "8032"
},
{
"name": "CSS",
"bytes": "24444"
},
{
"name": "HTML",
"bytes": "2120"
},
{
"name": "JavaScript",
"bytes": "25380"
},
{
"name": "Makefile",
"bytes": "288140"
},
{
"name": "Python",
"bytes": "43918939"
},
{
"name": "Shell",
"bytes": "73916"
}
],
"symlink_target": ""
}
|
import alsaaudio, time, audioop, argparse
from subprocess import call
parser = argparse.ArgumentParser(description='Adaptive Volume Application. Normalize with input from a Microphone')
parser.add_argument(
'--switch',
action='store_true',
help='Invert the action. Normalizing loud sounds Adapting down.'
)
parser.add_argument(
'--min',
type=int,
default=340,
help='Lower limit of the microphone eg: 340'
)
parser.add_argument(
'--max',
type=int,
default=4500,
help='Upper limit of the microphone eg: 3000'
)
parser.add_argument(
'--buffer',
type=int,
default=300,
help='Size of the buffer. The higher the number to smoother the transition. eg: 300'
)
# Get the arguments
args = parser.parse_args()
print args
if args.switch:
print "Inverting action"
# Open the device in nonblocking capture mode. The last argument could
# just as well have been zero for blocking mode. Then we could have
# left out the sleep call in the bottom of the loop
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE,alsaaudio.PCM_NONBLOCK)
# Set attributes: Mono, 8000 Hz, 16 bit little endian samples
inp.setchannels(1)
inp.setrate(8000)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
# The period size controls the internal number of frames per period.
# The significance of this parameter is documented in the ALSA api.
# For our purposes, it is suficcient to know that reads from the device
# will return this many frames. Each frame being 2 bytes long.
# This means that the reads below will return either 320 bytes of data
# or 0 bytes of data. The latter is possible because we are in nonblocking
# mode.
inp.setperiodsize(160)
# General Settings
volume = currentVolume = 70
volumeBuffer = []
bufferSize = args.buffer;
measure = 20
# Input volume specific (Mapping Input to Output)
# This is based on the measure
inputMin = args.min
inputMax = args.max
inputRate = ((inputMax - inputMin) / measure)
inputVolumes = range(inputMin, inputMax, inputRate)
# Switch the list
inputVolumes = inputVolumes[::-1]
# Output to vary 20% so the rate is a bit more set
outputVolumes = range(80, 100, 1)
# Switch the output mapping
if args.switch == False:
outputVolumes = outputVolumes[::-1]
# Set initial volume
m = alsaaudio.Mixer('Mic', cardindex=1)
m.setvolume(volume)
while True:
# Read data from device
l,data = inp.read()
if l:
# Return the maximum of the absolute value of all samples in a fragment.
input = audioop.max(data, 2)
volumeBuffer.append(input)
# Fill the buffer up
if (len(volumeBuffer) >= bufferSize):
averageVolume = (sum(volumeBuffer)/len(volumeBuffer))
for i in range(len(inputVolumes)):
if averageVolume >= int(inputVolumes[i]) :
volume = int(outputVolumes[i])
break
if volume != currentVolume:
currentVolume = volume
m = alsaaudio.Mixer('Mic', cardindex=1)
m.setvolume(volume)
print "IN:" + format(input, '06d') + ", AVG: " + format(averageVolume, '06d') + " OUT:" + format(volume, '03d') + "%"
volumeBuffer.pop(0)
else:
print "BUFFERING: filling (",len(volumeBuffer),"/",bufferSize,") buffer"
time.sleep(.001)
|
{
"content_hash": "9c29153dc9a650cce2e96fca03051417",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 129,
"avg_line_length": 29.651785714285715,
"alnum_prop": 0.6753989762119843,
"repo_name": "mikeyy/pywakealexa",
"id": "0202052060a4ca192bb86d51384df7bd7b8e0cc2",
"size": "3599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adapt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57734"
}
],
"symlink_target": ""
}
|
"""
IPMI power manager driver.
Uses the 'ipmitool' command (http://ipmitool.sourceforge.net/) to remotely
manage hardware. This includes setting the boot device, getting a
serial-over-LAN console, and controlling the power state of the machine.
NOTE THAT CERTAIN DISTROS MAY INSTALL openipmi BY DEFAULT, INSTEAD OF ipmitool,
WHICH PROVIDES DIFFERENT COMMAND-LINE OPTIONS AND *IS NOT SUPPORTED* BY THIS
DRIVER.
"""
import contextlib
import os
import re
import subprocess
import tempfile
import time
from ironic_lib import utils as ironic_utils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import strutils
import six
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
from ironic.drivers import utils as driver_utils
CONF = cfg.CONF
CONF.import_opt('retry_timeout',
'ironic.drivers.modules.ipminative',
group='ipmi')
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
LOG = logging.getLogger(__name__)
VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER']
VALID_PROTO_VERSIONS = ('2.0', '1.5')
REQUIRED_PROPERTIES = {
'ipmi_address': _("IP address or hostname of the node. Required.")
}
OPTIONAL_PROPERTIES = {
'ipmi_password': _("password. Optional."),
'ipmi_port': _("remote IPMI RMCP port. Optional."),
'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of "
"%s. Optional.") % ', '.join(VALID_PRIV_LEVELS),
'ipmi_username': _("username; default is NULL user. Optional."),
'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
"\"dual\", \"no\". Optional."),
'ipmi_transit_channel': _("transit channel for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_transit_address': _("transit address for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_target_channel': _("destination channel for bridged request. "
"Required only if ipmi_bridging is set to "
"\"single\" or \"dual\"."),
'ipmi_target_address': _("destination address for bridged request. "
"Required only if ipmi_bridging is set "
"to \"single\" or \"dual\"."),
'ipmi_local_address': _("local IPMB address for bridged requests. "
"Used only if ipmi_bridging is set "
"to \"single\" or \"dual\". Optional."),
'ipmi_protocol_version': _('the version of the IPMI protocol; default '
'is "2.0". One of "1.5", "2.0". Optional.'),
'ipmi_force_boot_device': _("Whether Ironic should specify the boot "
"device to the BMC each time the server "
"is turned on, eg. because the BMC is not "
"capable of remembering the selected boot "
"device across power cycles; default value "
"is False. Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
BRIDGING_OPTIONS = [('local_address', '-m'),
('transit_channel', '-B'), ('transit_address', '-T'),
('target_channel', '-b'), ('target_address', '-t')]
LAST_CMD_TIME = {}
TIMING_SUPPORT = None
SINGLE_BRIDGE_SUPPORT = None
DUAL_BRIDGE_SUPPORT = None
TMP_DIR_CHECKED = None
ipmitool_command_options = {
'timing': ['ipmitool', '-N', '0', '-R', '0', '-h'],
'single_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0', '-h'],
'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
'-B', '0', '-T', '0', '-h']}
# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
# and is substituted in return for the error code received from the IPMI
# controller. As of 1.8.15, no internationalization support appears to
# be in ipmitool which means the string should always be returned in this
# form regardless of locale.
IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']
def _check_option_support(options):
"""Checks if the specific ipmitool options are supported on host.
This method updates the module-level variables indicating whether
an option is supported so that it is accessible by any driver
interface class in this module. It is intended to be called from
the __init__ method of such classes only.
:param options: list of ipmitool options to be checked
:raises: OSError
"""
for opt in options:
if _is_option_supported(opt) is None:
try:
cmd = ipmitool_command_options[opt]
# NOTE(cinerama): use subprocess.check_call to
# check options & suppress ipmitool output to
# avoid alarming people
with open(os.devnull, 'wb') as nullfile:
subprocess.check_call(cmd, stdout=nullfile,
stderr=nullfile)
except subprocess.CalledProcessError:
LOG.info(_LI("Option %(opt)s is not supported by ipmitool"),
{'opt': opt})
_is_option_supported(opt, False)
else:
LOG.info(_LI("Option %(opt)s is supported by ipmitool"),
{'opt': opt})
_is_option_supported(opt, True)
def _is_option_supported(option, is_supported=None):
"""Indicates whether the particular ipmitool option is supported.
:param option: specific ipmitool option
:param is_supported: Optional Boolean. when specified, this value
is assigned to the module-level variable indicating
whether the option is supported. Used only if a value
is not already assigned.
:returns: True, indicates the option is supported
:returns: False, indicates the option is not supported
:returns: None, indicates that it is not aware whether the option
is supported
"""
global SINGLE_BRIDGE_SUPPORT
global DUAL_BRIDGE_SUPPORT
global TIMING_SUPPORT
if option == 'single_bridge':
if (SINGLE_BRIDGE_SUPPORT is None) and (is_supported is not None):
SINGLE_BRIDGE_SUPPORT = is_supported
return SINGLE_BRIDGE_SUPPORT
elif option == 'dual_bridge':
if (DUAL_BRIDGE_SUPPORT is None) and (is_supported is not None):
DUAL_BRIDGE_SUPPORT = is_supported
return DUAL_BRIDGE_SUPPORT
elif option == 'timing':
if (TIMING_SUPPORT is None) and (is_supported is not None):
TIMING_SUPPORT = is_supported
return TIMING_SUPPORT
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password for a console."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(CONF.tempdir, file_name)
@contextlib.contextmanager
def _make_password_file(password):
"""Makes a temporary file that contains the password.
:param password: the password
:returns: the absolute pathname of the temporary file
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file
"""
f = None
try:
f = tempfile.NamedTemporaryFile(mode='w', dir=CONF.tempdir)
f.write(str(password))
f.flush()
except (IOError, OSError) as exc:
if f is not None:
f.close()
raise exception.PasswordFileFailedToCreate(error=exc)
except Exception:
with excutils.save_and_reraise_exception():
if f is not None:
f.close()
try:
# NOTE(jlvillal): This yield can not be in the try/except block above
# because an exception by the caller of this function would then get
# changed to a PasswordFileFailedToCreate exception which would mislead
# about the problem and its cause.
yield f.name
finally:
if f is not None:
f.close()
def _parse_driver_info(node):
"""Gets the parameters required for ipmitool to access the node.
:param node: the Node of interest.
:returns: dictionary of parameters.
:raises: InvalidParameterValue when an invalid value is specified
:raises: MissingParameterValue when a required ipmi parameter is missing.
"""
info = node.driver_info or {}
bridging_types = ['single', 'dual']
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
address = info.get('ipmi_address')
username = info.get('ipmi_username')
password = six.text_type(info.get('ipmi_password', ''))
dest_port = info.get('ipmi_port')
port = info.get('ipmi_terminal_port')
priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
bridging_type = info.get('ipmi_bridging', 'no')
local_address = info.get('ipmi_local_address')
transit_channel = info.get('ipmi_transit_channel')
transit_address = info.get('ipmi_transit_address')
target_channel = info.get('ipmi_target_channel')
target_address = info.get('ipmi_target_address')
protocol_version = str(info.get('ipmi_protocol_version', '2.0'))
force_boot_device = info.get('ipmi_force_boot_device', False)
if not username:
LOG.warning(_LW('ipmi_username is not defined or empty for node %s: '
'NULL user will be utilized.') % node.uuid)
if not password:
LOG.warning(_LW('ipmi_password is not defined or empty for node %s: '
'NULL password will be utilized.') % node.uuid)
if protocol_version not in VALID_PROTO_VERSIONS:
valid_versions = ', '.join(VALID_PROTO_VERSIONS)
raise exception.InvalidParameterValue(_(
"Invalid IPMI protocol version value %(version)s, the valid "
"value can be one of %(valid_versions)s") %
{'version': protocol_version, 'valid_versions': valid_versions})
if port is not None:
port = utils.validate_network_port(port, 'ipmi_terminal_port')
if dest_port is not None:
dest_port = utils.validate_network_port(dest_port, 'ipmi_port')
# check if ipmi_bridging has proper value
if bridging_type == 'no':
# if bridging is not selected, then set all bridging params to None
(local_address, transit_channel, transit_address, target_channel,
target_address) = (None,) * 5
elif bridging_type in bridging_types:
# check if the particular bridging option is supported on host
if not _is_option_supported('%s_bridge' % bridging_type):
raise exception.InvalidParameterValue(_(
"Value for ipmi_bridging is provided as %s, but IPMI "
"bridging is not supported by the IPMI utility installed "
"on host. Ensure ipmitool version is > 1.8.11"
) % bridging_type)
# ensure that all the required parameters are provided
params_undefined = [param for param, value in [
("ipmi_target_channel", target_channel),
('ipmi_target_address', target_address)] if value is None]
if bridging_type == 'dual':
params_undefined2 = [param for param, value in [
("ipmi_transit_channel", transit_channel),
('ipmi_transit_address', transit_address)
] if value is None]
params_undefined.extend(params_undefined2)
else:
# if single bridging was selected, set dual bridge params to None
transit_channel = transit_address = None
# If the required parameters were not provided,
# raise an exception
if params_undefined:
raise exception.MissingParameterValue(_(
"%(param)s not provided") % {'param': params_undefined})
else:
raise exception.InvalidParameterValue(_(
"Invalid value for ipmi_bridging: %(bridging_type)s,"
" the valid value can be one of: %(bridging_types)s"
) % {'bridging_type': bridging_type,
'bridging_types': bridging_types + ['no']})
if priv_level not in VALID_PRIV_LEVELS:
valid_priv_lvls = ', '.join(VALID_PRIV_LEVELS)
raise exception.InvalidParameterValue(_(
"Invalid privilege level value:%(priv_level)s, the valid value"
" can be one of %(valid_levels)s") %
{'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
return {
'address': address,
'dest_port': dest_port,
'username': username,
'password': password,
'port': port,
'uuid': node.uuid,
'priv_level': priv_level,
'local_address': local_address,
'transit_channel': transit_channel,
'transit_address': transit_address,
'target_channel': target_channel,
'target_address': target_address,
'protocol_version': protocol_version,
'force_boot_device': force_boot_device,
}
def _exec_ipmitool(driver_info, command):
"""Execute the ipmitool command.
:param driver_info: the ipmitool parameters for accessing a node.
:param command: the ipmitool command to be executed.
:returns: (stdout, stderr) from executing the command.
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file.
:raises: processutils.ProcessExecutionError from executing the command.
"""
ipmi_version = ('lanplus'
if driver_info['protocol_version'] == '2.0'
else 'lan')
args = ['ipmitool',
'-I',
ipmi_version,
'-H',
driver_info['address'],
'-L', driver_info['priv_level']
]
if driver_info['dest_port']:
args.append('-p')
args.append(driver_info['dest_port'])
if driver_info['username']:
args.append('-U')
args.append(driver_info['username'])
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
args.append(option)
args.append(driver_info[name])
# specify retry timing more precisely, if supported
num_tries = max(
(CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
if _is_option_supported('timing'):
args.append('-R')
args.append(str(num_tries))
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
end_time = (time.time() + CONF.ipmi.retry_timeout)
while True:
num_tries = num_tries - 1
# NOTE(deva): ensure that no communications are sent to a BMC more
# often than once every min_command_interval seconds.
time_till_next_poll = CONF.ipmi.min_command_interval - (
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
if time_till_next_poll > 0:
time.sleep(time_till_next_poll)
# Resetting the list that will be utilized so the password arguments
# from any previous execution are preserved.
cmd_args = args[:]
# 'ipmitool' command will prompt password if there is no '-f'
# option, we set it to '\0' to write a password file to support
# empty password
with _make_password_file(driver_info['password'] or '\0') as pw_file:
cmd_args.append('-f')
cmd_args.append(pw_file)
cmd_args.extend(command.split(" "))
try:
out, err = utils.execute(*cmd_args)
return out, err
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctxt:
err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
if x in six.text_type(e)]
if ((time.time() > end_time) or
(num_tries == 0) or
not err_list):
LOG.error(_LE('IPMI Error while attempting "%(cmd)s"'
'for node %(node)s. Error: %(error)s'), {
'node': driver_info['uuid'],
'cmd': e.cmd, 'error': e
})
else:
ctxt.reraise = False
LOG.warning(_LW('IPMI Error encountered, retrying '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'), {
'node': driver_info['uuid'],
'cmd': e.cmd, 'error': e
})
finally:
LAST_CMD_TIME[driver_info['address']] = time.time()
def _sleep_time(iter):
"""Return the time-to-sleep for the n'th iteration of a retry loop.
This implementation increases exponentially.
:param iter: iteration number
:returns: number of seconds to sleep
"""
if iter <= 1:
return 1
return iter ** 2
def _set_and_wait(target_state, driver_info):
"""Helper function for DynamicLoopingCall.
This method changes the power state and polls the BMCuntil the desired
power state is reached, or CONF.ipmi.retry_timeout would be exceeded by the
next iteration.
This method assumes the caller knows the current power state and does not
check it prior to changing the power state. Most BMCs should be fine, but
if a driver is concerned, the state should be checked prior to calling this
method.
:param target_state: desired power state
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states
"""
if target_state == states.POWER_ON:
state_name = "on"
elif target_state == states.POWER_OFF:
state_name = "off"
def _wait(mutable):
try:
# Only issue power change command once
if mutable['iter'] < 0:
_exec_ipmitool(driver_info, "power %s" % state_name)
else:
mutable['power'] = _power_status(driver_info)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError,
exception.IPMIFailure):
# Log failures but keep trying
LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
{'state': state_name, 'node': driver_info['uuid']})
finally:
mutable['iter'] += 1
if mutable['power'] == target_state:
raise loopingcall.LoopingCallDone()
sleep_time = _sleep_time(mutable['iter'])
if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout:
# Stop if the next loop would exceed maximum retry_timeout
LOG.error(_LE('IPMI power %(state)s timed out after '
'%(tries)s retries on node %(node_id)s.'),
{'state': state_name, 'tries': mutable['iter'],
'node_id': driver_info['uuid']})
mutable['power'] = states.ERROR
raise loopingcall.LoopingCallDone()
else:
mutable['total_time'] += sleep_time
return sleep_time
# Use mutable objects so the looped method can change them.
# Start 'iter' from -1 so that the first two checks are one second apart.
status = {'power': None, 'iter': -1, 'total_time': 0}
timer = loopingcall.DynamicLoopingCall(_wait, status)
timer.start().wait()
return status['power']
def _power_on(driver_info):
"""Turn the power ON for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_ON, driver_info)
def _power_off(driver_info):
"""Turn the power OFF for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_OFF or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_OFF, driver_info)
def _power_status(driver_info):
"""Get the power status for a node.
:param driver_info: the ipmitool access parameters for a node.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool.
"""
cmd = "power status"
try:
out_err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW("IPMI power status failed for node %(node_id)s with "
"error: %(error)s."),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=cmd)
if out_err[0] == "Chassis Power is on\n":
return states.POWER_ON
elif out_err[0] == "Chassis Power is off\n":
return states.POWER_OFF
else:
return states.ERROR
def _process_sensor(sensor_data):
sensor_data_fields = sensor_data.split('\n')
sensor_data_dict = {}
for field in sensor_data_fields:
if not field:
continue
kv_value = field.split(':')
if len(kv_value) != 2:
continue
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
return sensor_data_dict
def _get_sensor_type(node, sensor_data_dict):
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
'Sensor Type (Threshold)'):
try:
return sensor_data_dict[key].split(' ', 1)[0]
except KeyError:
continue
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, unknown sensor type"
" data: %(sensors_data)s"),
{'sensors_data': sensor_data_dict}))
def _parse_ipmi_sensors_data(node, sensors_data):
"""Parse the IPMI sensors data and format to the dict grouping by type.
We run 'ipmitool' command with 'sdr -v' options, which can return sensor
details in human-readable format, we need to format them to JSON string
dict-based data for Ceilometer Collector which can be sent it as payload
out via notification bus and consumed by Ceilometer Collector.
:param sensors_data: the sensor data returned by ipmitool command.
:returns: the sensor data with JSON format, grouped by sensor type.
:raises: FailedToParseSensorData when error encountered during parsing.
"""
sensors_data_dict = {}
if not sensors_data:
return sensors_data_dict
sensors_data_array = sensors_data.split('\n\n')
for sensor_data in sensors_data_array:
sensor_data_dict = _process_sensor(sensor_data)
if not sensor_data_dict:
continue
sensor_type = _get_sensor_type(node, sensor_data_dict)
# ignore the sensors which has no current 'Sensor Reading' data
if 'Sensor Reading' in sensor_data_dict:
sensors_data_dict.setdefault(
sensor_type,
{})[sensor_data_dict['Sensor ID']] = sensor_data_dict
# get nothing, no valid sensor data
if not sensors_data_dict:
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, get nothing with input"
" data: %(sensors_data)s")
% {'sensors_data': sensors_data}))
return sensors_data_dict
@task_manager.require_exclusive_lock
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:returns: a tuple with stdout and stderr.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
return out, err
def dump_sdr(task, file_path):
"""Dump SDR data to a file.
:param task: a TaskManager instance.
:param file_path: the path to SDR dump file.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Dump SDR data for node %(node)s to file %(name)s',
{'name': file_path, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'sdr dump %s' % file_path
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('dump SDR returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "sdr dump" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def _check_temp_dir():
"""Check for Valid temp directory."""
global TMP_DIR_CHECKED
# because a temporary file is used to pass the password to ipmitool,
# we should check the directory
if TMP_DIR_CHECKED is None:
try:
utils.check_dir()
except (exception.PathNotFound,
exception.DirectoryNotWritable,
exception.InsufficientDiskSpace) as e:
with excutils.save_and_reraise_exception():
TMP_DIR_CHECKED = False
err_msg = (_("Ipmitool drivers need to be able to create "
"temporary files to pass password to ipmitool. "
"Encountered error: %s") % e)
e.message = err_msg
LOG.error(err_msg)
else:
TMP_DIR_CHECKED = True
class IPMIPower(base.PowerInterface):
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate driver_info for ipmitool driver.
Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
# NOTE(deva): don't actually touch the BMC in validate because it is
# called too often, and BMCs are too fragile.
# This is a temporary measure to mitigate problems while
# 1314954 and 1314961 are resolved.
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IPMIFailure on an error from ipmitool (from _power_status
call).
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: The desired power state, one of ironic.common.states
POWER_ON, POWER_OFF.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: MissingParameterValue if required ipmi parameters are missing
:raises: PowerStateFailure if the power couldn't be set to pstate.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
driver_utils.ensure_next_boot_device(task, driver_info)
state = _power_on(driver_info)
elif pstate == states.POWER_OFF:
state = _power_off(driver_info)
else:
raise exception.InvalidParameterValue(
_("set_power_state called "
"with invalid power state %s.") % pstate)
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
driver_info = _parse_driver_info(task.node)
_power_off(driver_info)
driver_utils.ensure_next_boot_device(task, driver_info)
state = _power_on(driver_info)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class IPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
boot_devices.BIOS, boot_devices.SAFE]
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: IPMIFailure on an error from ipmitool.
"""
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
# note(JayF): IPMI spec indicates unless you send these raw bytes the
# boot device setting times out after 60s. Since it's possible it
# could be >60s before a node is rebooted, we should always send them.
# This mimics pyghmi's current behavior, and the "option=timeout"
# setting on newer ipmitool binaries.
timeout_disable = "0x00 0x08 0x03 0x08"
send_raw(task, timeout_disable)
if task.node.driver_info.get('ipmi_force_boot_device', False):
driver_utils.force_persistent_boot(task,
device,
persistent)
# Reset persistent to False, in case of BMC does not support
# persistent or we do not have admin rights.
persistent = False
cmd = "chassis bootdev %s" % device
if persistent:
cmd = cmd + " options=persistent"
driver_info = _parse_driver_info(task.node)
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
driver_info = task.node.driver_info
driver_internal_info = task.node.driver_internal_info
if (driver_info.get('ipmi_force_boot_device', False) and
driver_internal_info.get('persistent_boot_device') and
driver_internal_info.get('is_next_boot_persistent', True)):
return {
'boot_device': driver_internal_info['persistent_boot_device'],
'persistent': True
}
cmd = "chassis bootparam get 5"
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None, 'persistent': None}
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
re_obj = re.search('Boot Device Selector : (.+)?\n', out)
if re_obj:
boot_selector = re_obj.groups('')[0]
if 'PXE' in boot_selector:
response['boot_device'] = boot_devices.PXE
elif 'Hard-Drive' in boot_selector:
if 'Safe-Mode' in boot_selector:
response['boot_device'] = boot_devices.SAFE
else:
response['boot_device'] = boot_devices.DISK
elif 'BIOS' in boot_selector:
response['boot_device'] = boot_devices.BIOS
elif 'CD/DVD' in boot_selector:
response['boot_device'] = boot_devices.CDROM
response['persistent'] = 'Options apply to all future boots' in out
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
# with '-v' option, we can get the entire sensor data including the
# extended sensor informations
cmd = "sdr -v"
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
raise exception.FailedToGetSensorData(node=task.node.uuid,
error=e)
return _parse_ipmi_sensors_data(task.node, out)
class VendorPassthru(base.VendorInterface):
def __init__(self):
try:
_check_option_support(['single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def send_raw(self, task, http_method, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
send_raw(task, raw_bytes)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def bmc_reset(self, task, http_method, warm=True):
"""Reset BMC with IPMI command 'bmc reset (warm|cold)'.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param warm: boolean parameter to decide on warm or cold reset.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified
"""
node_uuid = task.node.uuid
warm = strutils.bool_from_string(warm)
if warm:
warm_param = 'warm'
else:
warm_param = 'cold'
LOG.debug('Doing %(warm)s BMC reset on node %(node)s',
{'warm': warm_param, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'bmc reset %s' % warm_param
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('bmc reset returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "bmc reset" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
If invalid, raises an exception; otherwise returns None.
Valid methods:
* send_raw
* bmc_reset
:param task: a task from TaskManager.
:param method: method to be validated
:param kwargs: info for action.
:raises: InvalidParameterValue when an invalid parameter value is
specified.
:raises: MissingParameterValue if a required parameter is missing.
"""
if method == 'send_raw':
if not kwargs.get('raw_bytes'):
raise exception.MissingParameterValue(_(
'Parameter raw_bytes (string of bytes) was not '
'specified.'))
_parse_driver_info(task.node)
class IPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses ipmitool and shellinabox."""
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: InvalidParameterValue
:raises: MissingParameterValue when a required parameter is missing
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
if driver_info['protocol_version'] != '2.0':
raise exception.InvalidParameterValue(_(
"Serial over lan only works with IPMI protocol version 2.0. "
"Check the 'ipmi_protocol_version' parameter in "
"node's driver_info"))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: PasswordFileFailedToCreate if unable to create a file
containing the password
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'] or '\0')
ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
" -I lanplus -U %(user)s -f %(pwfile)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': driver_info['address'],
'user': driver_info['username'],
'pwfile': pw_file})
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
ipmi_cmd = " ".join([ipmi_cmd,
option, driver_info[name]])
if CONF.debug:
ipmi_cmd += " -v"
ipmi_cmd += " sol activate"
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
ipmi_cmd)
except (exception.ConsoleError, exception.ConsoleSubprocessFailed):
with excutils.save_and_reraise_exception():
ironic_utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: ConsoleError if unable to stop the console
"""
try:
console_utils.stop_shellinabox_console(task.node.uuid)
finally:
ironic_utils.unlink_without_raise(
_console_pwfile_path(task.node.uuid))
def get_console(self, task):
"""Get the type and connection information about the console."""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
|
{
"content_hash": "c24324d39313506b02192292f4ae2714",
"timestamp": "",
"source": "github",
"line_count": 1169,
"max_line_length": 79,
"avg_line_length": 40.09923011120616,
"alnum_prop": 0.5978112466934039,
"repo_name": "devananda/ironic",
"id": "cef19d25b97b1bc2a397dd9ff3cbe455d3b114b0",
"size": "47648",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironic/drivers/modules/ipmitool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3354566"
}
],
"symlink_target": ""
}
|
import uuid
import mock
from oslo_utils import timeutils
import six
from six.moves.urllib import parse as urlparse
from testtools import matchers
from keystoneclient import session
from keystoneclient.tests.unit.v3 import client_fixtures
from keystoneclient.tests.unit.v3 import utils
from keystoneclient.v3.contrib.oauth1 import access_tokens
from keystoneclient.v3.contrib.oauth1 import auth
from keystoneclient.v3.contrib.oauth1 import consumers
from keystoneclient.v3.contrib.oauth1 import request_tokens
try:
from oauthlib import oauth1
except ImportError:
oauth1 = None
class BaseTest(utils.TestCase):
def setUp(self):
super(BaseTest, self).setUp()
if oauth1 is None:
self.skipTest('oauthlib package not available')
class ConsumerTests(BaseTest, utils.CrudTests):
def setUp(self):
super(ConsumerTests, self).setUp()
self.key = 'consumer'
self.collection_key = 'consumers'
self.model = consumers.Consumer
self.manager = self.client.oauth1.consumers
self.path_prefix = 'OS-OAUTH1'
def new_ref(self, **kwargs):
kwargs = super(ConsumerTests, self).new_ref(**kwargs)
kwargs.setdefault('description', uuid.uuid4().hex)
return kwargs
def test_description_is_optional(self):
consumer_id = uuid.uuid4().hex
resp_ref = {'consumer': {'description': None,
'id': consumer_id}}
self.stub_url('POST',
[self.path_prefix, self.collection_key],
status_code=201, json=resp_ref)
consumer = self.manager.create()
self.assertEqual(consumer_id, consumer.id)
self.assertIsNone(consumer.description)
def test_description_not_included(self):
consumer_id = uuid.uuid4().hex
resp_ref = {'consumer': {'id': consumer_id}}
self.stub_url('POST',
[self.path_prefix, self.collection_key],
status_code=201, json=resp_ref)
consumer = self.manager.create()
self.assertEqual(consumer_id, consumer.id)
class TokenTests(BaseTest):
def _new_oauth_token(self):
key = uuid.uuid4().hex
secret = uuid.uuid4().hex
params = {'oauth_token': key, 'oauth_token_secret': secret}
token = urlparse.urlencode(params)
return (key, secret, token)
def _new_oauth_token_with_expires_at(self):
key, secret, token = self._new_oauth_token()
expires_at = timeutils.strtime()
params = {'oauth_token': key,
'oauth_token_secret': secret,
'oauth_expires_at': expires_at}
token = urlparse.urlencode(params)
return (key, secret, expires_at, token)
def _validate_oauth_headers(self, auth_header, oauth_client):
"""Assert that the data in the headers matches the data
that is produced from oauthlib.
"""
self.assertThat(auth_header, matchers.StartsWith('OAuth '))
parameters = dict(
oauth1.rfc5849.utils.parse_authorization_header(auth_header))
self.assertEqual('HMAC-SHA1', parameters['oauth_signature_method'])
self.assertEqual('1.0', parameters['oauth_version'])
self.assertIsInstance(parameters['oauth_nonce'], six.string_types)
self.assertEqual(oauth_client.client_key,
parameters['oauth_consumer_key'])
if oauth_client.resource_owner_key:
self.assertEqual(oauth_client.resource_owner_key,
parameters['oauth_token'],)
if oauth_client.verifier:
self.assertEqual(oauth_client.verifier,
parameters['oauth_verifier'])
if oauth_client.callback_uri:
self.assertEqual(oauth_client.callback_uri,
parameters['oauth_callback'])
return parameters
class RequestTokenTests(TokenTests):
def setUp(self):
super(RequestTokenTests, self).setUp()
self.model = request_tokens.RequestToken
self.manager = self.client.oauth1.request_tokens
self.path_prefix = 'OS-OAUTH1'
def test_authorize_request_token(self):
request_key = uuid.uuid4().hex
info = {'id': request_key,
'key': request_key,
'secret': uuid.uuid4().hex}
request_token = request_tokens.RequestToken(self.manager, info)
verifier = uuid.uuid4().hex
resp_ref = {'token': {'oauth_verifier': verifier}}
self.stub_url('PUT',
[self.path_prefix, 'authorize', request_key],
status_code=200, json=resp_ref)
# Assert the manager is returning the expected data
role_id = uuid.uuid4().hex
token = request_token.authorize([role_id])
self.assertEqual(verifier, token.oauth_verifier)
# Assert that the request was sent in the expected structure
exp_body = {'roles': [{'id': role_id}]}
self.assertRequestBodyIs(json=exp_body)
def test_create_request_token(self):
project_id = uuid.uuid4().hex
consumer_key = uuid.uuid4().hex
consumer_secret = uuid.uuid4().hex
request_key, request_secret, resp_ref = self._new_oauth_token()
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
self.stub_url('POST', [self.path_prefix, 'request_token'],
status_code=201, text=resp_ref, headers=headers)
# Assert the manager is returning request token object
request_token = self.manager.create(consumer_key, consumer_secret,
project_id)
self.assertIsInstance(request_token, self.model)
self.assertEqual(request_key, request_token.key)
self.assertEqual(request_secret, request_token.secret)
# Assert that the project id is in the header
self.assertRequestHeaderEqual('requested-project-id', project_id)
req_headers = self.requests_mock.last_request.headers
oauth_client = oauth1.Client(consumer_key,
client_secret=consumer_secret,
signature_method=oauth1.SIGNATURE_HMAC,
callback_uri="oob")
self._validate_oauth_headers(req_headers['Authorization'],
oauth_client)
class AccessTokenTests(TokenTests):
def setUp(self):
super(AccessTokenTests, self).setUp()
self.manager = self.client.oauth1.access_tokens
self.model = access_tokens.AccessToken
self.path_prefix = 'OS-OAUTH1'
def test_create_access_token_expires_at(self):
verifier = uuid.uuid4().hex
consumer_key = uuid.uuid4().hex
consumer_secret = uuid.uuid4().hex
request_key = uuid.uuid4().hex
request_secret = uuid.uuid4().hex
t = self._new_oauth_token_with_expires_at()
access_key, access_secret, expires_at, resp_ref = t
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
self.stub_url('POST', [self.path_prefix, 'access_token'],
status_code=201, text=resp_ref, headers=headers)
# Assert that the manager creates an access token object
access_token = self.manager.create(consumer_key, consumer_secret,
request_key, request_secret,
verifier)
self.assertIsInstance(access_token, self.model)
self.assertEqual(access_key, access_token.key)
self.assertEqual(access_secret, access_token.secret)
self.assertEqual(expires_at, access_token.expires)
req_headers = self.requests_mock.last_request.headers
oauth_client = oauth1.Client(consumer_key,
client_secret=consumer_secret,
resource_owner_key=request_key,
resource_owner_secret=request_secret,
signature_method=oauth1.SIGNATURE_HMAC,
verifier=verifier)
self._validate_oauth_headers(req_headers['Authorization'],
oauth_client)
class AuthenticateWithOAuthTests(TokenTests):
def setUp(self):
super(AuthenticateWithOAuthTests, self).setUp()
if oauth1 is None:
self.skipTest('optional package oauthlib is not installed')
def test_oauth_authenticate_success(self):
consumer_key = uuid.uuid4().hex
consumer_secret = uuid.uuid4().hex
access_key = uuid.uuid4().hex
access_secret = uuid.uuid4().hex
# Just use an existing project scoped token and change
# the methods to oauth1, and add an OS-OAUTH1 section.
oauth_token = client_fixtures.project_scoped_token()
oauth_token['methods'] = ["oauth1"]
oauth_token['OS-OAUTH1'] = {"consumer_id": consumer_key,
"access_token_id": access_key}
self.stub_auth(json=oauth_token)
a = auth.OAuth(self.TEST_URL, consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_key=access_key,
access_secret=access_secret)
s = session.Session(auth=a)
t = s.get_token()
self.assertEqual(self.TEST_TOKEN, t)
OAUTH_REQUEST_BODY = {
"auth": {
"identity": {
"methods": ["oauth1"],
"oauth1": {}
}
}
}
self.assertRequestBodyIs(json=OAUTH_REQUEST_BODY)
# Assert that the headers have the same oauthlib data
req_headers = self.requests_mock.last_request.headers
oauth_client = oauth1.Client(consumer_key,
client_secret=consumer_secret,
resource_owner_key=access_key,
resource_owner_secret=access_secret,
signature_method=oauth1.SIGNATURE_HMAC)
self._validate_oauth_headers(req_headers['Authorization'],
oauth_client)
class TestOAuthLibModule(utils.TestCase):
def test_no_oauthlib_installed(self):
with mock.patch.object(auth, 'oauth1', None):
self.assertRaises(NotImplementedError,
auth.OAuth,
self.TEST_URL,
consumer_key=uuid.uuid4().hex,
consumer_secret=uuid.uuid4().hex,
access_key=uuid.uuid4().hex,
access_secret=uuid.uuid4().hex)
|
{
"content_hash": "ad541dad1df2ee1112041f35a96e65ee",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 76,
"avg_line_length": 39.84249084249084,
"alnum_prop": 0.5853636112898777,
"repo_name": "darren-wang/ksc",
"id": "092e6777c5fc654070ff265b714db8ac8a62ac99",
"size": "11423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneclient/tests/unit/v3/test_oauth1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1405295"
},
{
"name": "Shell",
"bytes": "7107"
}
],
"symlink_target": ""
}
|
import cv2
import numpy as np
import os
from plantcv.plantcv._debug import _debug
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
def hist_equalization(gray_img):
"""
Histogram equalization is a method to normalize the distribution of intensity values. If the image has low
contrast it will make it easier to threshold.
Inputs:
gray_img = Grayscale image data
Returns:
img_eh = normalized image
:param gray_img: numpy.ndarray
:return img_eh: numpy.ndarray
"""
if len(np.shape(gray_img)) == 3:
fatal_error("Input image must be gray")
img_eh = cv2.equalizeHist(gray_img)
_debug(visual=img_eh,
filename=os.path.join(params.debug_outdir, str(params.device) + '_hist_equal_img.png'),
cmap='gray')
return img_eh
|
{
"content_hash": "e9b16a09ad0b6ae7db6473fcbe87c452",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 110,
"avg_line_length": 25.272727272727273,
"alnum_prop": 0.6762589928057554,
"repo_name": "stiphyMT/plantcv",
"id": "77b2519c371a7345fb7ce360d47ce74926ba811a",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plantcv/plantcv/hist_equalization.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "955647"
},
{
"name": "R",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "3348"
}
],
"symlink_target": ""
}
|
def test_response_headers(self):
response = self.client.{{ request.method|lower }}('{{ request.path }}', data={{ request.data|safe }})
{% for k, v in response.headers %}self.assertEqual(response['{{ k }}'], '{{ v }}')
{% endfor %}
|
{
"content_hash": "d0522f7c052491e855d23799580ffa76",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 109,
"avg_line_length": 64.75,
"alnum_prop": 0.5598455598455598,
"repo_name": "kezabelle/django-testguess",
"id": "f4e245323aa466437b2b4b099f41d1a2bbb6b68f",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testguess/templates/testguess/headers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1695"
},
{
"name": "Python",
"bytes": "39285"
}
],
"symlink_target": ""
}
|
from sphinx import addnodes
import docutils.parsers.rst.directives
from .base import BaseDirective
class AutoFunctionDirective(BaseDirective):
"""Directive to render :term:`Javascript` function documentation.
The unique argument should be the identifier of the function element.
.. sourcecode:: rest
.. js:autofunction:: module.doSomething
The available options are:
* alias:
String element to replace the function name.
* module-alias:
String element to replace the module name.
* module-path-alias:
String element to replace the module path.
* force-partial-import:
Indicate whether the function import statement display should be
indicated with partial import if the function element is exported.
.. seealso::
:ref:`directive/autofunction`
"""
#: Javascript function is callable
has_arguments = True
#: Define the Object type
objtype = "function"
#: function options
option_spec = {
"alias": docutils.parsers.rst.directives.unchanged_required,
"module-alias": docutils.parsers.rst.directives.unchanged_required,
"module-path-alias": docutils.parsers.rst.directives.unchanged_required,
"force-partial-import": lambda x: True,
}
def handle_signature(self, signature, node):
"""Update the signature *node*."""
env = self.state.document.settings.env.element_environment
module_env = self.state.document.settings.env.module_environment
name = self.options.get("alias", env["name"])
module_id = env["module_id"]
module_name = self.options.get(
"module-alias", module_env[module_id]["name"]
)
node["type"] = "function"
node["id"] = env["id"]
node["module"] = module_name
node["fullname"] = name
if env["generator"]:
node += addnodes.desc_type("function* ", "function* ")
node += addnodes.desc_addname(module_name + ".", module_name + ".")
node += addnodes.desc_name(name, name)
param_list = addnodes.desc_parameterlist()
for argument in env["arguments"]:
param_list += addnodes.desc_parameter(argument, argument)
node += param_list
return name, module_name
def before_content(self):
"""Update the content.
Compute the description and import statement if available.
"""
env = self.state.document.settings.env.element_environment
module_env = self.state.document.settings.env.module_environment
if not env["anonymous"]:
self.content = self.generate_import_statement(
env, module_env, self.options.get("force-partial-import")
)
self.content += self.generate_description(env)
|
{
"content_hash": "7b6d3dc94f354ec459ae7851d92aec4f",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 30.49462365591398,
"alnum_prop": 0.6382228490832158,
"repo_name": "buddly27/champollion",
"id": "8a1bcb9cc06224454b25d1281dcde82c1bd4a2a2",
"size": "2854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/champollion/directive/js_function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "332819"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages, Command
import sys
__version__ = '0.2.0'
tests_require = []
if sys.version_info < (2, 7):
tests_require += ['unittest2']
class Tox(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import tox
except ImportError:
import sys
sys.exit("tox is required to run tests. $ pip install tox")
tox.cmdline()
#
# determine requirements
#
setup(
name="zombie",
version=__version__,
include_package_data=True,
author="Ryan Petrello",
author_email="ryan [at] ryanpetrello [dot] com",
url="https://github.com/ryanpetrello/python-zombie",
description="A Python driver for Zombie.js",
long_description=open('README.rst').read(),
packages=find_packages(exclude=['ez_setup', 'tests']),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Traffic Generation'
],
license="MIT",
install_requires=[],
tests_require=tests_require,
cmdclass={'test': Tox}
)
|
{
"content_hash": "7b5f8403713604e0b6df4aa5db2c9c5a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 72,
"avg_line_length": 30.984848484848484,
"alnum_prop": 0.6053789731051344,
"repo_name": "ryanpetrello/python-zombie",
"id": "8229ed72ebbf39c7e5909653202e6d3695aa9ef8",
"size": "2045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1162"
},
{
"name": "Python",
"bytes": "57331"
}
],
"symlink_target": ""
}
|
from .base import Image, ImageBoundaryError
from .boolean import BooleanImage
from .masked import MaskedImage
from .interpolation import scipy_interpolation
|
{
"content_hash": "7e7c1236913661af26eff5a4a8bc60ce",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 39.25,
"alnum_prop": 0.8535031847133758,
"repo_name": "jalabort/ijcv-2014-aam",
"id": "36698047387ef8e3079ce11bf697c17150aaf8ca",
"size": "157",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aam/image/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "2112584"
},
{
"name": "Python",
"bytes": "320521"
}
],
"symlink_target": ""
}
|
"""The Server Group API Extension."""
from oslo_log import log as logging
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import server_groups as schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
import nova.exception
from nova.i18n import _
from nova.i18n import _LE
from nova import objects
LOG = logging.getLogger(__name__)
ALIAS = "os-server-groups"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
class ServerGroupController(wsgi.Controller):
"""The Server group API controller for the OpenStack API."""
def _format_server_group(self, context, group):
# the id field has its value as the uuid of the server group
# There is no 'uuid' key in server_group seen by clients.
# In addition, clients see policies as a ["policy-name"] list;
# and they see members as a ["server-id"] list.
server_group = {}
server_group['id'] = group.uuid
server_group['name'] = group.name
server_group['policies'] = group.policies or []
# NOTE(danms): This has been exposed to the user, but never used.
# Since we can't remove it, just make sure it's always empty.
server_group['metadata'] = {}
members = []
if group.members:
# Display the instances that are not deleted.
filters = {'uuid': group.members, 'deleted': False}
instances = objects.InstanceList.get_by_filters(
context, filters=filters)
members = [instance.uuid for instance in instances]
server_group['members'] = members
return server_group
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given server group."""
context = _authorize_context(req)
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'server_group': self._format_server_group(context, sg)}
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete an server group."""
context = _authorize_context(req)
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
quotas = objects.Quotas()
project_id, user_id = objects.quotas.ids_from_server_group(context, sg)
try:
# We have to add the quota back to the user that created
# the server group
quotas.reserve(context, project_id=project_id,
user_id=user_id, server_groups=-1)
except Exception:
quotas = None
LOG.exception(_LE("Failed to update usages deallocating "
"server group"))
try:
sg.destroy()
except nova.exception.InstanceGroupNotFound as e:
if quotas:
quotas.rollback()
raise webob.exc.HTTPNotFound(explanation=e.format_message())
if quotas:
quotas.commit()
@extensions.expected_errors(())
def index(self, req):
"""Returns a list of server groups."""
context = _authorize_context(req)
project_id = context.project_id
if 'all_projects' in req.GET and context.is_admin:
sgs = objects.InstanceGroupList.get_all(context)
else:
sgs = objects.InstanceGroupList.get_by_project_id(
context, project_id)
limited_list = common.limited(sgs.objects, req)
result = [self._format_server_group(context, group)
for group in limited_list]
return {'server_groups': result}
@extensions.expected_errors((400, 403))
@validation.schema(schema.create)
def create(self, req, body):
"""Creates a new server group."""
context = _authorize_context(req)
quotas = objects.Quotas()
try:
quotas.reserve(context, project_id=context.project_id,
user_id=context.user_id, server_groups=1)
except nova.exception.OverQuota:
msg = _("Quota exceeded, too many server groups.")
raise exc.HTTPForbidden(explanation=msg)
vals = body['server_group']
sg = objects.InstanceGroup(context)
sg.project_id = context.project_id
sg.user_id = context.user_id
try:
sg.name = vals.get('name')
sg.policies = vals.get('policies')
sg.create()
except ValueError as e:
quotas.rollback()
raise exc.HTTPBadRequest(explanation=e)
quotas.commit()
return {'server_group': self._format_server_group(context, sg)}
class ServerGroups(extensions.V3APIExtensionBase):
"""Server group support."""
name = "ServerGroups"
alias = ALIAS
version = 1
def get_resources(self):
res = extensions.ResourceExtension(
ALIAS, controller=ServerGroupController(),
member_actions={"action": "POST", })
return [res]
def get_controller_extensions(self):
return []
|
{
"content_hash": "cffb9aecc41867479fe0624aeb5e20c2",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 79,
"avg_line_length": 35.452229299363054,
"alnum_prop": 0.6151634926338484,
"repo_name": "cloudbase/nova-virtualbox",
"id": "58c55df42b4adc9b073aec34d08012c586e981f0",
"size": "6205",
"binary": false,
"copies": "2",
"ref": "refs/heads/virtualbox_driver",
"path": "nova/api/openstack/compute/plugins/v3/server_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16016453"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "497954"
}
],
"symlink_target": ""
}
|
import os
import time
from ethereum import utils
from ethereum import pruning_trie as trie
from ethereum.refcount_db import RefcountDB
from ethereum.db import OverlayDB
from ethereum.utils import to_string, is_string
import rlp
from rlp.utils import encode_hex
from ethereum import blocks
from ethereum import processblock
from ethereum.slogging import get_logger
from ethereum.config import Env
import sys
log = get_logger('eth.chain')
class Index(object):
""""
Collection of indexes
children:
- needed to get the uncles of a block
blocknumbers:
- needed to mark the longest chain (path to top)
transactions:
- optional to resolve txhash to block:tx
"""
def __init__(self, env, index_transactions=True):
assert isinstance(env, Env)
self.env = env
self.db = env.db
self._index_transactions = index_transactions
def add_block(self, blk):
self.add_child(blk.prevhash, blk.hash)
if self._index_transactions:
self._add_transactions(blk)
# block by number #########
def _block_by_number_key(self, number):
return 'blocknumber:%d' % number
def update_blocknumbers(self, blk):
"start from head and update until the existing indices match the block"
while True:
if blk.number > 0:
self.db.put_temporarily(self._block_by_number_key(blk.number), blk.hash)
else:
self.db.put(self._block_by_number_key(blk.number), blk.hash)
self.db.commit_refcount_changes(blk.number)
if blk.number == 0:
break
blk = blk.get_parent()
if self.has_block_by_number(blk.number) and \
self.get_block_by_number(blk.number) == blk.hash:
break
def has_block_by_number(self, number):
return self._block_by_number_key(number) in self.db
def get_block_by_number(self, number):
"returns block hash"
return self.db.get(self._block_by_number_key(number))
# transactions #############
def _add_transactions(self, blk):
"'tx_hash' -> 'rlp([blockhash,tx_number])"
for i, tx in enumerate(blk.get_transactions()):
self.db.put_temporarily(tx.hash, rlp.encode([blk.hash, i]))
self.db.commit_refcount_changes(blk.number)
def get_transaction(self, txhash):
"return (tx, block, index)"
blockhash, tx_num_enc = rlp.decode(self.db.get(txhash))
blk = rlp.decode(self.db.get(blockhash), blocks.Block, env=self.env)
num = utils.decode_int(tx_num_enc)
tx_data = blk.get_transaction(num)
return tx_data, blk, num
# children ##############
def _child_db_key(self, blk_hash):
return b'ci:' + blk_hash
def add_child(self, parent_hash, child_hash):
# only efficient for few children per block
children = list(set(self.get_children(parent_hash) + [child_hash]))
assert children.count(child_hash) == 1
self.db.put_temporarily(self._child_db_key(parent_hash), rlp.encode(children))
def get_children(self, blk_hash):
"returns block hashes"
key = self._child_db_key(blk_hash)
if key in self.db:
return rlp.decode(self.db.get(key))
return []
class Chain(object):
"""
Manages the chain and requests to it.
:ivar head_candidate: the block which if mined by our miner would become
the new head
"""
head_candidate = None
def __init__(self, env, genesis=None, new_head_cb=None, coinbase='\x00' * 20):
assert isinstance(env, Env)
self.env = env
self.db = self.blockchain = env.db
self.new_head_cb = new_head_cb
self.index = Index(self.env)
self._coinbase = coinbase
if 'HEAD' not in self.db:
self._initialize_blockchain(genesis)
log.debug('chain @', head_hash=self.head)
self.genesis = self.get(self.index.get_block_by_number(0))
log.debug('got genesis', nonce=self.genesis.nonce.encode('hex'),
difficulty=self.genesis.difficulty)
self._update_head_candidate()
def _initialize_blockchain(self, genesis=None):
log.info('Initializing new chain')
if not genesis:
genesis = blocks.genesis(self.env)
log.info('new genesis', genesis_hash=genesis, difficulty=genesis.difficulty)
self.index.add_block(genesis)
self._store_block(genesis)
assert genesis == blocks.get_block(self.env, genesis.hash)
self._update_head(genesis)
assert genesis.hash in self
self.commit()
@property
def coinbase(self):
assert self.head_candidate.coinbase == self._coinbase
return self._coinbase
@coinbase.setter
def coinbase(self, value):
self._coinbase = value
# block reward goes to different address => redo finalization of head candidate
self._update_head(self.head)
@property
def head(self):
if self.blockchain is None or 'HEAD' not in self.blockchain:
self._initialize_blockchain()
ptr = self.blockchain.get('HEAD')
return blocks.get_block(self.env, ptr)
def _update_head(self, block, forward_pending_transactions=True):
log.debug('updating head')
if not block.is_genesis():
#assert self.head.chain_difficulty() < block.chain_difficulty()
if block.get_parent() != self.head:
log.debug('New Head is on a different branch',
head_hash=block, old_head_hash=self.head)
# Some temporary auditing to make sure pruning is working well
if block.number > 0 and block.number % 500 == 0 and isinstance(self.db, RefcountDB):
trie.proof.push(trie.RECORDING)
block.to_dict(with_state=True)
n = trie.proof.get_nodelist()
trie.proof.pop()
sys.stderr.write('State size: %d\n' % sum([(len(rlp.encode(a)) + 32) for a in n]))
# Fork detected, revert death row and change logs
if block.number > 0:
b = block.get_parent()
h = self.head
b_children = []
if b.hash != h.hash:
log.warn('reverting')
while h.number > b.number:
h.state.db.revert_refcount_changes(h.number)
h = h.get_parent()
while b.number > h.number:
b_children.append(b)
b = b.get_parent()
while b.hash != h.hash:
h.state.db.revert_refcount_changes(h.number)
h = h.get_parent()
b_children.append(b)
b = b.get_parent()
for bc in b_children:
processblock.verify(bc, bc.get_parent())
self.blockchain.put('HEAD', block.hash)
assert self.blockchain.get('HEAD') == block.hash
sys.stderr.write('New head: %s %d\n' % (utils.encode_hex(block.hash), block.number))
self.index.update_blocknumbers(self.head)
self._update_head_candidate(forward_pending_transactions)
if self.new_head_cb and not block.is_genesis():
self.new_head_cb(block)
def _update_head_candidate(self, forward_pending_transactions=True):
"after new head is set"
log.debug('updating head candidate')
# collect uncles
blk = self.head # parent of the block we are collecting uncles for
uncles = set(u.header for u in self.get_brothers(blk))
for i in range(self.env.config['MAX_UNCLE_DEPTH'] + 2):
for u in blk.uncles:
assert isinstance(u, blocks.BlockHeader)
uncles.discard(u)
if blk.has_parent():
blk = blk.get_parent()
assert not uncles or max(u.number for u in uncles) <= self.head.number
uncles = list(uncles)[:self.env.config['MAX_UNCLES']]
# create block
ts = max(int(time.time()), self.head.timestamp + 1)
_env = Env(OverlayDB(self.head.db), self.env.config, self.env.global_config)
head_candidate = blocks.Block.init_from_parent(self.head, coinbase=self._coinbase,
timestamp=ts, uncles=uncles, env=_env)
assert head_candidate.validate_uncles()
self.pre_finalize_state_root = head_candidate.state_root
head_candidate.finalize()
# add transactions from previous head candidate
old_head_candidate = self.head_candidate
self.head_candidate = head_candidate
if old_head_candidate is not None and forward_pending_transactions:
log.debug('forwarding pending transactions')
for tx in old_head_candidate.get_transactions():
self.add_transaction(tx)
else:
log.debug('discarding pending transactions')
def get_uncles(self, block):
"""Return the uncles of `block`."""
if not block.has_parent():
return []
else:
return self.get_brothers(block.get_parent())
def get_brothers(self, block):
"""Return the uncles of the hypothetical child of `block`."""
o = []
i = 0
while block.has_parent() and i < self.env.config['MAX_UNCLE_DEPTH']:
parent = block.get_parent()
o.extend([u for u in self.get_children(parent) if u != block])
block = block.get_parent()
i += 1
return o
def get(self, blockhash):
assert is_string(blockhash)
assert len(blockhash) == 32
return blocks.get_block(self.env, blockhash)
def has_block(self, blockhash):
assert is_string(blockhash)
assert len(blockhash) == 32
return blockhash in self.blockchain
def __contains__(self, blockhash):
return self.has_block(blockhash)
def _store_block(self, block):
if block.number > 0:
self.blockchain.put_temporarily(block.hash, rlp.encode(block))
else:
self.blockchain.put(block.hash, rlp.encode(block))
def commit(self):
self.blockchain.commit()
def add_block(self, block, forward_pending_transactions=True):
"returns True if block was added sucessfully"
_log = log.bind(block_hash=block)
# make sure we know the parent
if not block.has_parent() and not block.is_genesis():
_log.debug('missing parent')
return False
if not block.validate_uncles():
_log.debug('invalid uncles')
return False
if not len(block.nonce) == 8:
_log.debug('nonce not set')
return False
elif not block.header.check_pow(nonce=block.nonce) and\
not block.is_genesis():
_log.debug('invalid nonce')
return False
if block.has_parent():
try:
processblock.verify(block, block.get_parent())
except processblock.VerificationFailed as e:
_log.critical('VERIFICATION FAILED', error=e)
f = os.path.join(utils.data_dir, 'badblock.log')
open(f, 'w').write(to_string(block.hex_serialize()))
return False
if block.number < self.head.number:
_log.debug("older than head", head_hash=self.head)
# Q: Should we have any limitations on adding blocks?
self.index.add_block(block)
self._store_block(block)
# set to head if this makes the longest chain w/ most work for that number
if block.chain_difficulty() > self.head.chain_difficulty():
_log.debug('new head')
self._update_head(block, forward_pending_transactions)
elif block.number > self.head.number:
_log.warn('has higher blk number than head but lower chain_difficulty',
head_hash=self.head, block_difficulty=block.chain_difficulty(),
head_difficulty=self.head.chain_difficulty())
block.transactions.clear_all()
block.receipts.clear_all()
block.state.db.commit_refcount_changes(block.number)
block.state.db.cleanup(block.number)
self.commit() # batch commits all changes that came with the new block
return True
def get_children(self, block):
return [self.get(c) for c in self.index.get_children(block.hash)]
def add_transaction(self, transaction):
"""Add a transaction to the :attr:`head_candidate` block.
If the transaction is invalid, the block will not be changed.
:returns: `True` is the transaction was successfully added or `False`
if the transaction was invalid
"""
assert self.head_candidate is not None
head_candidate = self.head_candidate
log.debug('add tx', num_txs=self.num_transactions(), tx=transaction, on=head_candidate)
if self.head_candidate.includes_transaction(transaction.hash):
log.debug('known tx')
return
old_state_root = head_candidate.state_root
# revert finalization
head_candidate.state_root = self.pre_finalize_state_root
try:
success, output = processblock.apply_transaction(head_candidate, transaction)
except processblock.InvalidTransaction as e:
# if unsuccessful the prerequisites were not fullfilled
# and the tx is invalid, state must not have changed
log.debug('invalid tx', error=e)
head_candidate.state_root = old_state_root # reset
return False
log.debug('valid tx')
# we might have a new head_candidate (due to ctx switches in pyethapp)
if self.head_candidate != head_candidate:
log.debug('head_candidate changed during validation, trying again')
self.add_transaction(transaction)
return
self.pre_finalize_state_root = head_candidate.state_root
head_candidate.finalize()
log.debug('tx applied', result=output)
assert old_state_root != head_candidate.state_root
return True
def get_transactions(self):
"""Get a list of new transactions not yet included in a mined block
but known to the chain.
"""
if self.head_candidate:
log.debug('get_transactions called', on=self.head_candidate)
return self.head_candidate.get_transactions()
else:
return []
def num_transactions(self):
if self.head_candidate:
return self.head_candidate.transaction_count
else:
return 0
def get_chain(self, start='', count=10):
"return 'count' blocks starting from head or start"
log.debug("get_chain", start=encode_hex(start), count=count)
blocks = []
block = self.head
if start:
if start not in self.index.db:
return []
block = self.get(start)
if not self.in_main_branch(block):
return []
for i in range(count):
blocks.append(block)
if block.is_genesis():
break
block = block.get_parent()
return blocks
def in_main_branch(self, block):
try:
return block.hash == self.index.get_block_by_number(block.number)
except KeyError:
return False
def get_descendants(self, block, count=1):
log.debug("get_descendants", block_hash=block)
assert block.hash in self
block_numbers = list(range(block.number + 1, min(self.head.number + 1,
block.number + count + 1)))
return [self.get(self.index.get_block_by_number(n)) for n in block_numbers]
|
{
"content_hash": "6b18bb6b7d1888dab4875e5490e833a1",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 95,
"avg_line_length": 38.572463768115945,
"alnum_prop": 0.5938380612436596,
"repo_name": "vaporry/pyethereum",
"id": "99a6f3c63db4aadb9b1665699c9c36fbf9c755a0",
"size": "15969",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ethereum/chain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2045"
},
{
"name": "Python",
"bytes": "454620"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.