text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.contrib import admin
from django.utils.translation import ugettext as _
from django_attach.forms import AttachmentInline
from linguo.forms import MultilingualModelForm
from main.models import *
class CategoryAdmin(admin.ModelAdmin):
list_display = ('title','path')
inlines = (AttachmentInline,)
class PageAdminForm(MultilingualModelForm):
class Meta:
model = Page
fields = '__all__'
class PageAdmin(admin.ModelAdmin):
form = PageAdminForm
list_display = ('title','category','path')
search_fields = ('name','title')
list_filter = ('category',)
inlines = (AttachmentInline,)
fieldsets = (
(None, { 'fields': (
'title',
'title_en',
'name',
'name_en',
'category',
'redirect',
'markup',
'content',
'content_en',
)}),
(_('Advanced options'), {
'fields': ('style',)
}),
)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Page, PageAdmin)
|
{
"content_hash": "d071d71d06b048ee75e606cc55586d26",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 50,
"avg_line_length": 20.543478260869566,
"alnum_prop": 0.6624338624338625,
"repo_name": "peterkuma/tjrapid",
"id": "6c7800d560def6e439f3b0d4fbb4ffe8cfa47f89",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "35400"
},
{
"name": "HTML",
"bytes": "43945"
},
{
"name": "JavaScript",
"bytes": "257779"
},
{
"name": "Python",
"bytes": "82948"
},
{
"name": "XSLT",
"bytes": "2142"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from blog.models import *
class ArticleAdmin(admin.ModelAdmin):
list_display = ('author', 'title', 'date_post')
fieldsets = (
(
'None',
{'fields': ('author', 'title', 'content')}
),
)
admin.site.register(Article, ArticleAdmin)
admin.site.register(BlogComment)
|
{
"content_hash": "dc03f5bc22469be16a22669d6a94fcef",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.6098265895953757,
"repo_name": "piratos/ctfbulletin",
"id": "3aa1b4248fa6d8de708205cf522d6413c07846c0",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2231"
},
{
"name": "JavaScript",
"bytes": "22972"
},
{
"name": "Python",
"bytes": "33522"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import os
import shutil
import logging
import time
from flexget import plugin
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
def get_directory_size(directory):
"""
:param directory: Path
:return: Size in bytes (recursively)
"""
dir_size = 0
for (path, _, files) in os.walk(directory):
for file in files:
filename = os.path.join(path, file)
dir_size += os.path.getsize(filename)
return dir_size
def get_siblings(ext, main_file_no_ext, main_file_ext, abs_path):
siblings = {}
files = os.listdir(abs_path)
for filename in files:
# skip the main file
if filename == main_file_no_ext + main_file_ext:
continue
filename_lower = filename.lower()
if not filename_lower.startswith(main_file_no_ext.lower()) or not filename_lower.endswith(ext.lower()):
continue
# we have to use the length of the main file (no ext) to extract the rest of the filename
# for the future renaming
file_ext = filename[len(main_file_no_ext):]
file_path = os.path.join(abs_path, filename)
if os.path.exists(file_path):
siblings[file_path] = file_ext
return siblings
class BaseFileOps(object):
# Defined by subclasses
log = None
along = {
'type': 'object',
'properties': {
'extensions': one_or_more({'type': 'string'}),
'subdirs': one_or_more({'type': 'string'})
},
'additionalProperties': False,
'required': ['extensions']
}
def prepare_config(self, config):
if config is True:
return {}
elif config is False:
return
if 'along' not in config:
return config
extensions = config['along'].get('extensions')
subdirs = config['along'].get('subdirs')
if extensions and not isinstance(extensions, list):
config['along']['extensions'] = [extensions]
if subdirs and not isinstance(subdirs, list):
config['along']['subdirs'] = [subdirs]
return config
def on_task_output(self, task, config):
config = self.prepare_config(config)
if config is None:
return
for entry in task.accepted:
if 'location' not in entry:
self.log.verbose('Cannot handle %s because it does not have the field location.' % entry['title'])
continue
src = entry['location']
src_isdir = os.path.isdir(src)
try:
# check location
if not os.path.exists(src):
raise plugin.PluginWarning('location `%s` does not exists (anymore).' % src)
if src_isdir:
if not config.get('allow_dir'):
raise plugin.PluginWarning('location `%s` is a directory.' % src)
elif not os.path.isfile(src):
raise plugin.PluginWarning('location `%s` is not a file.' % src)
# search for namesakes
siblings = {} # dict of (path=ext) pairs
if not src_isdir and 'along' in config:
parent = os.path.dirname(src)
filename_no_ext, filename_ext = os.path.splitext(os.path.basename(src))
for ext in config['along']['extensions']:
siblings.update(get_siblings(ext, filename_no_ext, filename_ext, parent))
files = os.listdir(parent)
files_lower = list(map(str.lower, files))
for subdir in config['along'].get('subdirs', []):
try:
idx = files_lower.index(subdir)
except ValueError:
continue
subdir_path = os.path.join(parent, files[idx])
if not os.path.isdir(subdir_path):
continue
for ext in config['along']['extensions']:
siblings.update(get_siblings(ext, filename_no_ext, filename_ext, subdir_path))
# execute action in subclasses
self.handle_entry(task, config, entry, siblings)
except OSError as err:
entry.fail(str(err))
continue
def clean_source(self, task, config, entry):
min_size = entry.get('clean_source', config.get('clean_source', -1))
if min_size < 0:
return
base_path = os.path.split(entry.get('old_location', entry['location']))[0]
# everything here happens after a successful execution of the main action: the entry has been moved in a
# different location, or it does not exists anymore. so from here we can just log warnings and move on.
if not os.path.isdir(base_path):
self.log.warning('Cannot delete path `%s` because it does not exists (anymore).' % base_path)
return
dir_size = get_directory_size(base_path) / 1024 / 1024
if dir_size >= min_size:
self.log.info('Path `%s` left because it exceeds safety value set in clean_source option.' % base_path)
return
if task.options.test:
self.log.info('Would delete `%s` and everything under it.' % base_path)
return
try:
shutil.rmtree(base_path)
self.log.info('Path `%s` has been deleted because was less than clean_source safe value.' % base_path)
except Exception as err:
self.log.warning('Unable to delete path `%s`: %s' % (base_path, err))
def handle_entry(self, task, config, entry, siblings):
raise NotImplementedError()
class DeleteFiles(BaseFileOps):
"""Delete all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'allow_dir': {'type': 'boolean'},
'along': BaseFileOps.along,
'clean_source': {'type': 'number'}
},
'additionalProperties': False
}
]
}
log = logging.getLogger('delete')
def handle_entry(self, task, config, entry, siblings):
src = entry['location']
src_isdir = os.path.isdir(src)
if task.options.test:
if src_isdir:
self.log.info('Would delete `%s` and all its content.' % src)
else:
self.log.info('Would delete `%s`' % src)
for s, _ in siblings.items():
self.log.info('Would also delete `%s`' % s)
return
# IO errors will have the entry mark failed in the base class
if src_isdir:
shutil.rmtree(src)
self.log.info('`%s` and all its content has been deleted.' % src)
else:
os.remove(src)
self.log.info('`%s` has been deleted.' % src)
# further errors will not have any effect (the entry does not exists anymore)
for s, _ in siblings.items():
try:
os.remove(s)
self.log.info('`%s` has been deleted as well.' % s)
except Exception as err:
self.log.warning(str(err))
if not src_isdir:
self.clean_source(task, config, entry)
class TransformingOps(BaseFileOps):
# Defined by subclasses
move = None
destination_field = None
def handle_entry(self, task, config, entry, siblings):
src = entry['location']
src_isdir = os.path.isdir(src)
src_path, src_name = os.path.split(src)
# get the proper path and name in order of: entry, config, above split
dst_path = entry.get(self.destination_field, config.get('to', src_path))
if config.get('rename'):
dst_name = config['rename']
elif entry.get('filename') and entry['filename'] != src_name:
# entry specifies different filename than what was split from the path
# since some inputs fill in filename it must be different in order to be used
dst_name = entry['filename']
else:
dst_name = src_name
try:
dst_path = entry.render(dst_path)
except RenderError as err:
raise plugin.PluginError('Path value replacement `%s` failed: %s' % (dst_path, err.args[0]))
try:
dst_name = entry.render(dst_name)
except RenderError as err:
raise plugin.PluginError('Filename value replacement `%s` failed: %s' % (dst_name, err.args[0]))
# Clean invalid characters with pathscrub plugin
dst_path = pathscrub(os.path.expanduser(dst_path))
dst_name = pathscrub(dst_name, filename=True)
# Join path and filename
dst = os.path.join(dst_path, dst_name)
if dst == entry['location']:
raise plugin.PluginWarning('source and destination are the same.')
if not os.path.exists(dst_path):
if task.options.test:
self.log.info('Would create `%s`' % dst_path)
else:
self.log.info('Creating destination directory `%s`' % dst_path)
os.makedirs(dst_path)
if not os.path.isdir(dst_path) and not task.options.test:
raise plugin.PluginWarning('destination `%s` is not a directory.' % dst_path)
# unpack_safety
if config.get('unpack_safety', entry.get('unpack_safety', True)):
count = 0
while True:
if count > 60 * 30:
raise plugin.PluginWarning('The task has been waiting unpacking for 30 minutes')
size = os.path.getsize(src)
time.sleep(1)
new_size = os.path.getsize(src)
if size != new_size:
if not count % 10:
self.log.verbose('File `%s` is possibly being unpacked, waiting ...' % src_name)
else:
break
count += 1
src_file, src_ext = os.path.splitext(src)
dst_file, dst_ext = os.path.splitext(dst)
# Check dst contains src_ext
if config.get('keep_extension', entry.get('keep_extension', True)):
if not src_isdir and dst_ext != src_ext:
self.log.verbose('Adding extension `%s` to dst `%s`' % (src_ext, dst))
dst += src_ext
funct_name = 'move' if self.move else 'copy'
funct_done = 'moved' if self.move else 'copied'
if task.options.test:
self.log.info('Would %s `%s` to `%s`' % (funct_name, src, dst))
for s, ext in siblings.items():
# we cannot rely on splitext for extensions here (subtitles may have the language code)
d = dst_file + ext
self.log.info('Would also %s `%s` to `%s`' % (funct_name, s, d))
else:
# IO errors will have the entry mark failed in the base class
if self.move:
shutil.move(src, dst)
elif src_isdir:
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
self.log.info('`%s` has been %s to `%s`' % (src, funct_done, dst))
# further errors will not have any effect (the entry has been successfully moved or copied out)
for s, ext in siblings.items():
# we cannot rely on splitext for extensions here (subtitles may have the language code)
d = dst_file + ext
try:
if self.move:
shutil.move(s, d)
else:
shutil.copy(s, d)
self.log.info('`%s` has been %s to `%s` as well.' % (s, funct_done, d))
except Exception as err:
self.log.warning(str(err))
entry['old_location'] = entry['location']
entry['location'] = dst
if self.move and not src_isdir:
self.clean_source(task, config, entry)
class CopyFiles(TransformingOps):
"""Copy all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'rename': {'type': 'string'},
'allow_dir': {'type': 'boolean'},
'unpack_safety': {'type': 'boolean'},
'keep_extension': {'type': 'boolean'},
'along': TransformingOps.along
},
'additionalProperties': False
}
]
}
move = False
destination_field = 'copy_to'
log = logging.getLogger('copy')
class MoveFiles(TransformingOps):
"""Move all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'rename': {'type': 'string'},
'allow_dir': {'type': 'boolean'},
'unpack_safety': {'type': 'boolean'},
'keep_extension': {'type': 'boolean'},
'along': TransformingOps.along,
'clean_source': {'type': 'number'}
},
'additionalProperties': False
}
]
}
move = True
destination_field = 'move_to'
log = logging.getLogger('move')
@event('plugin.register')
def register_plugin():
plugin.register(DeleteFiles, 'delete', api_ver=2)
plugin.register(CopyFiles, 'copy', api_ver=2)
plugin.register(MoveFiles, 'move', api_ver=2)
|
{
"content_hash": "f76b8da22e8347035bd05ae84a8b5d98",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 115,
"avg_line_length": 38.60597826086956,
"alnum_prop": 0.532976701625959,
"repo_name": "jacobmetrick/Flexget",
"id": "ba6d31e250e7944626e69769da9e58bb5b3b5698",
"size": "14207",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/plugins/output/move.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "78933"
},
{
"name": "JavaScript",
"bytes": "261421"
},
{
"name": "Python",
"bytes": "3090372"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
}
|
import sys
import os
from StringIO import StringIO
from grammar import GrammarStore
from theme import ThemeStore
from debug_formatter import DebugFormatter
def validate_testcases(dirname):
print(u'Testcase validation\n')
gs = GrammarStore()
print(u'\nRunning Testcases')
testcases = os.path.join(dirname, u'Testcases')
for name in os.listdir(testcases):
if name != u'.DS_Store' and os.path.splitext(name)[1] != u'.xml':
testfile = os.path.join(testcases, name)
checkfile = os.path.splitext(testfile)[0] + u'.xml'
if os.path.isfile(testfile):
if os.path.isfile(checkfile):
with open(checkfile, u'rt') as f:
check = f.read()
try:
with open(testfile, u'rt') as f:
debug_stream = StringIO()
fmt = DebugFormatter(debug_stream)
gs.parse_document(testfile, f, fmt)
if check == debug_stream.getvalue():
print(u'✓ success: %s' % name)
else:
print(u'✗ failure, output has changed: %s' % name)
except:
print(u'✗ failure, exception: %s' % name)
else:
print(u'✗ failure, no check file for: %s' % name)
def usage(name):
print(u'''%s -h|test|create <testcase>|debug <testcase>''' % name)
if __name__ == u'__main__':
if len(sys.argv) > 1:
if len(sys.argv) == 2 and sys.argv[1] == u'test':
dirname = os.path.dirname(os.path.realpath(sys.argv[0]))
validate_testcases(dirname)
elif len(sys.argv) == 3 and sys.argv[1] == u'debug' or sys.argv[1] == u'create':
testfile = sys.argv[2]
checkfile = os.path.splitext(testfile)[0] + u'.xml'
gs = GrammarStore()
ts = ThemeStore()
debug_stream = StringIO()
fmt = DebugFormatter(debug_stream, theme=ts.themes.values()[0])
with open(testfile, u'rt') as f:
gs.parse_document(testfile, f, fmt)
# validate all content against original document
original = u''
with open(testfile, u'rt') as f:
original = f.read()
if fmt.content == original:
print(u'✓ content is identical')
else:
print(u'✗ content is not identical to original')
if sys.argv[1] == u'create':
with open(checkfile, u'wb') as f:
f.write(debug_stream.getvalue())
else:
usage(sys.argv[0])
else:
usage(sys.argv[0])
|
{
"content_hash": "01cdf26be8b0bcd0782af361550d0f7a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 88,
"avg_line_length": 36.986666666666665,
"alnum_prop": 0.5075702956020187,
"repo_name": "mountainstorm/textfriend",
"id": "c4f31f8ae55219a75467af15b3f5984eaf6be3d2",
"size": "3918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grammar_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "190356"
},
{
"name": "Python",
"bytes": "58172"
}
],
"symlink_target": ""
}
|
from game import *
from learningAgents import ReinforcementAgent
from featureExtractors import *
import random,util,math
import numpy as np
class QLearningAgent(ReinforcementAgent):
"""
Q-Learning Agent
Functions you should fill in:
- computeValueFromQValues
- computeActionFromQValues
- getQValue
- getAction
- update
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate)
Functions you should use
- self.getLegalActions(state)
which returns legal actions for a state
"""
def __init__(self, **args):
"You can initialize Q-values here..."
ReinforcementAgent.__init__(self, **args)
"*** YOUR CODE HERE ***"
self.QValues = util.Counter() # A Counter is a dict with default 0
def getQValue(self, state, action):
"""
Returns Q(state,action)
Should return 0.0 if we have never seen a state
or the Q node value otherwise
"""
"*** YOUR CODE HERE ***"
return self.QValues[(state, action)]
util.raiseNotDefined()
def computeValueFromQValues(self, state):
"""
Returns max_action Q(state,action)
where the max is over legal actions. Note that if
there are no legal actions, which is the case at the
terminal state, you should return a value of 0.0.
"""
"*** YOUR CODE HERE ***"
if state == 'TERMINAL_STATE': return 0.0
return max([self.getQValue(state, action) for action in self.getLegalActions(state)])
util.raiseNotDefined()
def computeActionFromQValues(self, state):
"""
Compute the best action to take in a state. Note that if there
are no legal actions, which is the case at the terminal state,
you should return None.
"""
"*** YOUR CODE HERE ***"
if state == 'TERMINAL_STATE': return 0.0
actions = self.getLegalActions(state)
action_values = [self.getQValue(state, action) for action in actions]
return actions[np.argmax(action_values)]
util.raiseNotDefined()
def getAction(self, state):
"""
Compute the action to take in the current state. With
probability self.epsilon, we should take a random action and
take the best policy action otherwise. Note that if there are
no legal actions, which is the case at the terminal state, you
should choose None as the action.
HINT: You might want to use util.flipCoin(prob)
HINT: To pick randomly from a list, use random.choice(list)
"""
# Pick Action
legalActions = self.getLegalActions(state)
action = None
"*** YOUR CODE HERE ***"
if True:
if np.random.rand() < self.epsilon:
action = np.random.choice(legalActions)
else:
prob = np.array([self.getQValue(state, a) for a in legalActions])
# prob -= prob.min()
# if prob.sum() == 0:
# prob = [1./len(prob)] * len(prob)
# else:
# prob /= prob.sum()
# action = np.random.choice(legalActions, p=prob)
action = legalActions[np.argmax(prob)]
return action
# util.raiseNotDefined()
def update(self, state, action, nextState, reward):
"""
The parent class calls this to observe a
state = action => nextState and reward transition.
You should do your Q-Value update here
NOTE: You should never call this function,
it will be called on your behalf
"""
"*** YOUR CODE HERE ***"
try:
nextStateMaxQ = max([self.getQValue(nextState, nextAction) \
for nextAction in self.getLegalActions(nextState)])
except:
nextStateMaxQ = 0
self.QValues[(state, action)] = \
self.getQValue(state, action)\
+ self.alpha * (reward\
+ self.discount * nextStateMaxQ\
- self.getQValue(state, action))
# util.raiseNotDefined()
def getPolicy(self, state):
return self.computeActionFromQValues(state)
def getValue(self, state):
return self.computeValueFromQValues(state)
class PacmanQAgent(QLearningAgent):
"Exactly the same as QLearningAgent, but with different default parameters"
def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
"""
These default parameters can be changed from the pacman.py command line.
For example, to change the exploration rate, try:
python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
alpha - learning rate
epsilon - exploration rate
gamma - discount factor
numTraining - number of training episodes, i.e. no learning after these many episodes
"""
args['epsilon'] = epsilon
args['gamma'] = gamma
args['alpha'] = alpha
args['numTraining'] = numTraining
self.index = 0 # This is always Pacman
QLearningAgent.__init__(self, **args)
def getAction(self, state):
"""
Simply calls the getAction method of QLearningAgent and then
informs parent of action for Pacman. Do not change or remove this
method.
"""
action = QLearningAgent.getAction(self,state)
self.doAction(state,action)
return action
class ApproximateQAgent(PacmanQAgent):
"""
ApproximateQLearningAgent
You should only have to overwrite getQValue
and update. All other QLearningAgent functions
should work as is.
"""
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
self.weights = util.Counter()
def getWeights(self):
return self.weights
def getQValue(self, state, action):
"""
Should return Q(state,action) = w * featureVector
where * is the dotProduct operator
"""
"*** YOUR CODE HERE ***"
feat = self.featExtractor.getFeatures(state, action)
Q = 0.0
for dim in feat:
Q += self.weights[dim] * feat[dim]
return Q
# util.raiseNotDefined()
def update(self, state, action, nextState, reward):
"""
Should update your weights based on transition
"""
"*** YOUR CODE HERE ***"
try:
nextStateMaxQ = max([self.getQValue(nextState, nextAction) \
for nextAction in self.getLegalActions(nextState)])
except:
nextStateMaxQ = 0
feat = self.featExtractor.getFeatures(state, action)
newweights = self.weights.copy() # synchronous
for dim in feat:
newweights[dim] = \
self.weights[dim]\
+ feat[dim] * \
self.alpha * (reward\
+ self.discount * nextStateMaxQ\
- self.getQValue(state, action))
self.weights = newweights
# util.raiseNotDefined()
def final(self, state):
"Called at the end of each game."
# call the super-class final method
PacmanQAgent.final(self, state)
# did we finish training?
if self.episodesSoFar == self.numTraining:
# you might want to print your weights here for debugging
"*** YOUR CODE HERE ***"
pass
|
{
"content_hash": "d1454035c5d2b8f2bea74ff461cdc1f2",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 93,
"avg_line_length": 34.610619469026545,
"alnum_prop": 0.5793914599846587,
"repo_name": "PiscesDream/Ideas",
"id": "bd78ad92e1d66375a6d64b510631fae14e687219",
"size": "8490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reinforcement/tutorial/qlearningAgents.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "216"
},
{
"name": "CSS",
"bytes": "19456"
},
{
"name": "HTML",
"bytes": "5157752"
},
{
"name": "JavaScript",
"bytes": "103650"
},
{
"name": "Matlab",
"bytes": "12086"
},
{
"name": "Python",
"bytes": "817578"
}
],
"symlink_target": ""
}
|
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from CIP.logic.SlicerUtil import SlicerUtil
from CIP.ui import PreProcessingWidget
#
# CIP_InteractiveLobeSegmentation
#
class CIP_InteractiveLobeSegmentation(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "Interactive Lobe Segmentation" # TODO make this more human readable by adding spaces
parent.categories = SlicerUtil.CIP_ModulesCategory
parent.dependencies = [SlicerUtil.CIP_ModuleName]
parent.contributors = [
"Pietro Nardelli (UCC/SPL) and Applied Chest Imaging Laboratory, Brigham and Women's Hopsital"]
parent.helpText = """
Scripted loadable module for Interactive Lobe segmentation.<br>
A quick tutorial of the module can be found <a href='https://chestimagingplatform.org/files/chestimagingplatform/files/interactivelobesegmentation_tutorial_pn.pdf'>here</a>
"""
parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText
self.parent = parent
#
# CIP_InteractiveLobeSegmentationWidget
#
class CIP_InteractiveLobeSegmentationWidget(ScriptedLoadableModuleWidget):
def __init__(self, parent=None):
ScriptedLoadableModuleWidget.__init__(self, parent)
self.logic = CIP_InteractiveLobeSegmentationLogic()
self.observerTags = []
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
# def enter(self):
# """Method that is invoked when we switch to the module in slicer user interface"""
# if self.nodeObserver is None:
# self.nodeObserver = slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAddedEvent, self.onNodeAdded)
# self.checkMasterAndLabelMapNodes()
def enter(self):
if self.labelSelector.currentNode():
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetLabelVolumeID(self.labelSelector.currentNode().GetID())
def exit(self):
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetLabelVolumeID('None')
# slicer.mrmlScene.RemoveObserver(self.nodeObserver)
def setup(self):
# Instantiate and connect widgets ...
ScriptedLoadableModuleWidget.setup(self)
if SlicerUtil.IsDevelopment:
self.reloadAndTestButton.visible = False # No valid tests at the moment
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
parametersFormLayout.setVerticalSpacing(5)
#
# Input volume selector
#
self.CTSelector = slicer.qMRMLNodeComboBox()
self.CTSelector.nodeTypes = (("vtkMRMLScalarVolumeNode"), "")
self.CTSelector.addAttribute("vtkMRMLScalarVolumeNode", "LabelMap", 0)
self.CTSelector.selectNodeUponCreation = False
self.CTSelector.addEnabled = False
self.CTSelector.removeEnabled = False
self.CTSelector.noneEnabled = True
self.CTSelector.showHidden = False
self.CTSelector.showChildNodeTypes = False
self.CTSelector.setMRMLScene(slicer.mrmlScene)
self.CTSelector.setToolTip("Pick the CT image to work on.")
parametersFormLayout.addRow("Input CT Volume: ", self.CTSelector)
#
# First input volume selector
#
self.labelSelector = slicer.qMRMLNodeComboBox()
# self.labelSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
# self.labelSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", 1 )
self.labelSelector.nodeTypes = (("vtkMRMLLabelMapVolumeNode"), "")
self.labelSelector.selectNodeUponCreation = True
self.labelSelector.addEnabled = False
self.labelSelector.removeEnabled = False
self.labelSelector.noneEnabled = True
self.labelSelector.showHidden = False
self.labelSelector.showChildNodeTypes = False
self.labelSelector.setMRMLScene(slicer.mrmlScene)
self.labelSelector.setToolTip("Pick the label map to the algorithm.")
parametersFormLayout.addRow("Label Map Volume: ", self.labelSelector)
#
# output volume selector
#
self.outputSelector = slicer.qMRMLNodeComboBox()
# self.outputSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
# self.outputSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", 1 )
self.outputSelector.nodeTypes = (("vtkMRMLLabelMapVolumeNode"), "")
self.outputSelector.selectNodeUponCreation = True
self.outputSelector.addEnabled = True
self.outputSelector.removeEnabled = True
self.outputSelector.noneEnabled = True
self.outputSelector.showHidden = False
self.outputSelector.showChildNodeTypes = True
self.outputSelector.setMRMLScene(slicer.mrmlScene)
self.outputSelector.setToolTip("Pick the output to the algorithm.")
self.outputSelector.baseName = 'Fissures Segmentation Volume'
parametersFormLayout.addRow("Fissures Volume: ", self.outputSelector)
self.preProcessingWidget = PreProcessingWidget(self.moduleName, parentWidget=self.parent)
self.preProcessingWidget.setup()
self.preProcessingWidget.filterApplication.hide()
self.preProcessingWidget.enableFilteringFrame(True)
self.preProcessingWidget.enableFilterOptions(True)
self.preProcessingWidget.enableLMFrame(True)
self.layoutCollapsibleButton = ctk.ctkCollapsibleButton()
self.layoutCollapsibleButton.text = "Layout Selection"
self.layoutCollapsibleButton.setChecked(False)
self.layoutCollapsibleButton.setFixedSize(600, 40)
self.layout.addWidget(self.layoutCollapsibleButton, 0, 4)
self.layoutFormLayout = qt.QFormLayout(self.layoutCollapsibleButton)
"""spacer = ""
for s in range( 20 ):
spacer += " """
# self.fiducialsFormLayout.setFormAlignment(4)
self.layoutGroupBox = qt.QFrame()
self.layoutGroupBox.setLayout(qt.QVBoxLayout())
self.layoutGroupBox.setFixedHeight(86)
self.layoutFormLayout.addRow(self.layoutGroupBox)
self.buttonGroupBox = qt.QFrame()
self.buttonGroupBox.setLayout(qt.QHBoxLayout())
self.layoutGroupBox.layout().addWidget(self.buttonGroupBox)
# self.layoutFormLayout.addRow(self.buttonGroupBox)
#
# Four-Up Button
#
self.fourUpButton = qt.QPushButton()
self.fourUpButton.toolTip = "Four-up view."
self.fourUpButton.enabled = True
self.fourUpButton.setFixedSize(40, 40)
fourUpIcon = qt.QIcon(":/Icons/LayoutFourUpView.png")
self.fourUpButton.setIcon(fourUpIcon)
self.buttonGroupBox.layout().addWidget(self.fourUpButton)
#
# Red Slice Button
#
self.redViewButton = qt.QPushButton()
self.redViewButton.toolTip = "Red slice only."
self.redViewButton.enabled = True
self.redViewButton.setFixedSize(40, 40)
redIcon = qt.QIcon(":/Icons/LayoutOneUpRedSliceView.png")
self.redViewButton.setIcon(redIcon)
self.buttonGroupBox.layout().addWidget(self.redViewButton)
#
# Yellow Slice Button
#
self.yellowViewButton = qt.QPushButton()
self.yellowViewButton.toolTip = "Yellow slice only."
self.yellowViewButton.enabled = True
self.yellowViewButton.setFixedSize(40, 40)
yellowIcon = qt.QIcon(":/Icons/LayoutOneUpYellowSliceView.png")
self.yellowViewButton.setIcon(yellowIcon)
self.buttonGroupBox.layout().addWidget(self.yellowViewButton)
#
# Green Slice Button
#
self.greenViewButton = qt.QPushButton()
self.greenViewButton.toolTip = "Yellow slice only."
self.greenViewButton.enabled = True
self.greenViewButton.setFixedSize(40, 40)
greenIcon = qt.QIcon(":/Icons/LayoutOneUpGreenSliceView.png")
self.greenViewButton.setIcon(greenIcon)
self.buttonGroupBox.layout().addWidget(self.greenViewButton)
#
# Buttons labels
#
self.labelsGroupBox = qt.QFrame()
hBox = qt.QHBoxLayout()
hBox.setSpacing(10)
self.labelsGroupBox.setLayout(hBox)
self.labelsGroupBox.setFixedSize(450, 26)
self.layoutGroupBox.layout().addWidget(self.labelsGroupBox, 0, 4)
fourUpLabel = qt.QLabel(" Four-up")
# fourUpLabel.setFixedHeight(10)
self.labelsGroupBox.layout().addWidget(fourUpLabel)
redLabel = qt.QLabel(" Axial")
self.labelsGroupBox.layout().addWidget(redLabel)
yellowLabel = qt.QLabel(" Saggital")
self.labelsGroupBox.layout().addWidget(yellowLabel)
greenLabel = qt.QLabel(" Coronal")
self.labelsGroupBox.layout().addWidget(greenLabel)
# Layout connections
self.fourUpButton.connect('clicked()', self.onFourUpButton)
self.redViewButton.connect('clicked()', self.onRedViewButton)
self.yellowViewButton.connect('clicked()', self.onYellowViewButton)
self.greenViewButton.connect('clicked()', self.onGreenViewButton)
#
# Fiducials Area #
self.groupBox = qt.QFrame()
self.groupBox.setLayout(qt.QHBoxLayout())
fiducialsCollapsibleButton = ctk.ctkCollapsibleButton()
fiducialsCollapsibleButton.text = "Fiducials Selection"
self.layout.addWidget(fiducialsCollapsibleButton)
self.fiducialsFormLayout = qt.QFormLayout(fiducialsCollapsibleButton)
self.fiducialsFormLayout.setVerticalSpacing(5)
self.fiducialsFormLayout.addRow(self.groupBox)
# Add fiducial lists button
self.AddLeftListButton = qt.QPushButton("Left oblique fissure")
self.AddLeftListButton.toolTip = "Create a new fiducial list for the left lung oblique fissure."
self.AddLeftListButton.setFixedHeight(40)
self.groupBox.layout().addWidget(self.AddLeftListButton)
# Add fiducial lists button
self.AddRight1ListButton = qt.QPushButton("Right oblique fissure")
self.AddRight1ListButton.toolTip = "Create a new fiducial list for the right lung oblique fissure."
self.AddRight1ListButton.setFixedHeight(40)
self.groupBox.layout().addWidget(self.AddRight1ListButton)
# Add fiducial lists button
self.AddRight2ListButton = qt.QPushButton("Right horizontal fissure")
self.AddRight2ListButton.toolTip = "Create a new fiducial list for the right lung horizontal fissure."
self.AddRight2ListButton.setFixedHeight(40)
self.groupBox.layout().addWidget(self.AddRight2ListButton)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Run the algorithm."
self.applyButton.enabled = False
self.applyButton.setFixedSize(150, 45)
self.layout.addWidget(self.applyButton, 0, 4)
# self.layout.setAlignment(2)
#
# Show Fiducials
#
fiducialButtonsList = []
fiducialButtonsList.append(self.AddLeftListButton)
fiducialButtonsList.append(self.AddRight1ListButton)
fiducialButtonsList.append(self.AddRight2ListButton)
self.visualizationWidget = ILSVisualizationWidget(self.logic, self.applyButton, fiducialButtonsList)
self.fiducialsFormLayout.addRow(self.visualizationWidget.widget)
# connections
self.applyButton.connect('clicked(bool)', self.onApplyButton)
self.CTSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onCTSelect)
self.labelSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.AddLeftListButton.connect('clicked()', self.onAddLeftListButton)
self.AddRight1ListButton.connect('clicked()', self.onAddRight1ListButton)
self.AddRight2ListButton.connect('clicked()', self.onAddRight2ListButton)
self.updateList()
# Add vertical spacer
self.layout.addStretch(1)
def cleanup(self):
pass
def onCTSelect(self, CTNode):
if CTNode:
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(CTNode.GetID())
else:
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID('None')
def onSelect(self):
self.layoutCollapsibleButton.setChecked(True)
if self.labelSelector.currentNode():
self.preProcessingWidget.enableFilteringFrame(False)
self.preProcessingWidget.enableLMFrame(False)
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetLabelVolumeID(self.labelSelector.currentNode().GetID())
SlicerUtil.changeLabelmapOpacity(0.5)
else:
self.preProcessingWidget.enableFilteringFrame(True)
self.preProcessingWidget.enableLMFrame(True)
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetLabelVolumeID('None')
def onFourUpButton(self):
applicationLogic = slicer.app.applicationLogic()
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.Reset(None)
interactionNode.SwitchToPersistentPlaceMode()
layoutManager = slicer.app.layoutManager()
layoutManager.setLayout(3)
def onRedViewButton(self):
applicationLogic = slicer.app.applicationLogic()
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.Reset(None)
interactionNode.SwitchToPersistentPlaceMode()
layoutManager = slicer.app.layoutManager()
layoutManager.setLayout(6)
def onYellowViewButton(self):
applicationLogic = slicer.app.applicationLogic()
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.Reset(None)
interactionNode.SwitchToPersistentPlaceMode()
layoutManager = slicer.app.layoutManager()
layoutManager.setLayout(7)
def onGreenViewButton(self):
applicationLogic = slicer.app.applicationLogic()
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.Reset(None)
interactionNode.SwitchToPersistentPlaceMode()
layoutManager = slicer.app.layoutManager()
layoutManager.setLayout(8)
def onAddLeftListButton(self):
# self.applyButton.enabled = self.inputSelector.currentNode() and self.labelSelector.currentNode() and self.outputSelector.currentNode()
self.AddLeftListButton.setStyleSheet("background-color: rgb(255,99,71)")
self.AddRight1ListButton.setStyleSheet("background-color: rgb(255,255,255)")
self.AddRight2ListButton.setStyleSheet("background-color: rgb(255,255,255)")
logic = CIP_InteractiveLobeSegmentationLogic()
self.logic.name = 'LO'
logic.createList('LO')
def onAddRight1ListButton(self):
# self.applyButton.enabled = self.inputSelector.currentNode() and self.labelSelector.currentNode() and self.outputSelector.currentNode()
self.AddRight1ListButton.setStyleSheet("background-color: rgb(255,99,71)")
self.AddLeftListButton.setStyleSheet("background-color: rgb(255,255,255)")
self.AddRight2ListButton.setStyleSheet("background-color: rgb(255,255,255)")
logic = CIP_InteractiveLobeSegmentationLogic()
self.logic.name = 'RO'
logic.createList('RO')
def onAddRight2ListButton(self):
# self.applyButton.enabled = self.inputSelector.currentNode() and self.labelSelector.currentNode() and self.outputSelector.currentNode()
self.AddRight2ListButton.setStyleSheet("background-color: rgb(255,99,71)")
self.AddLeftListButton.setStyleSheet("background-color: rgb(255,255,255)")
self.AddRight1ListButton.setStyleSheet("background-color: rgb(255,255,255)")
logic = CIP_InteractiveLobeSegmentationLogic()
self.logic.name = 'RH'
logic.createList('RH')
def onApplyButton(self):
red_logic = slicer.app.layoutManager().sliceWidget("Red").sliceLogic()
red_cn = red_logic.GetSliceCompositeNode()
volumeID = red_cn.GetBackgroundVolumeID()
CTNode = SlicerUtil.getNode(volumeID)
if self.labelSelector.currentNode() == None:
warning = self.preProcessingWidget.warningMessageForLM()
if warning == 16384:
labelNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLLabelMapVolumeNode())
labelNode.SetName(CTNode.GetName() + '_partialLungLabelMap')
if not CTNode:
self.applyButton.enabled = True
return False
if self.preProcessingWidget.filterOnRadioButton.checked:
volumesLogic = slicer.modules.volumes.logic()
clonedCTNode = volumesLogic.CloneVolume(slicer.mrmlScene, CTNode, 'Cloned Volume')
self.filterInputCT(clonedCTNode)
self.createLungLabelMap(clonedCTNode,labelNode)
slicer.mrmlScene.RemoveNode(clonedCTNode)
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(CTNode.GetID())
else:
self.createLungLabelMap(CTNode,labelNode)
else:
qt.QMessageBox.warning(slicer.util.mainWindow(),
"Parenchyma Analysis", "Please select a Lung Label Map.")
self.applyButton.enabled = True
return False
self.visualizationWidget.updateScene()
self.applyButton.text = "Segmenting Lobes..."
self.applyButton.repaint()
slicer.app.processEvents()
logic = CIP_InteractiveLobeSegmentationLogic()
self.visualizationWidget.pendingUpdate = True
outputNode = self.outputSelector.currentNode()
if not outputNode:
outputNode = slicer.vtkMRMLLabelMapVolumeNode()
slicer.mrmlScene.AddNode(outputNode)
fissureVolume = None
try:
fissureVolume = logic.run(self.labelSelector.currentNode(), outputNode)
except Exception as e:
import traceback
traceback.print_exc()
qt.QMessageBox.warning(slicer.util.mainWindow(),
"Running", 'Exception!\n\n' + str(e) + "\n\nSee Python Console for Stack Trace")
# if fissureVolume is not None:
self.outputSelector.setCurrentNode(fissureVolume)
SlicerUtil.changeLabelmapOpacity(0.5)
self.onFourUpButton()
self.applyButton.text = "Apply"
self.applyButton.repaint()
slicer.app.processEvents()
self.applyButton.enabled = True
applicationLogic = slicer.app.applicationLogic()
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.Reset(None)
self.visualizationWidget.pendingUpdate = False
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(
CTNode.GetID())
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetLabelVolumeID(
self.outputSelector.currentNode().GetID())
def filterInputCT(self, input_node):
# self.applyButton.enabled = False
self.applyButton.text = "Filtering..."
self.applyButton.repaint()
slicer.app.processEvents()
self.preProcessingWidget.filterInputCT(input_node)
def createLungLabelMap(self, input_node, label_node):
"""Create the lung label map
"""
self.applyButton.text = "Creating Label Map..."
self.applyButton.repaint()
slicer.app.processEvents()
self.preProcessingWidget.createPartialLM(input_node, label_node)
SlicerUtil.changeLabelmapOpacity(0.5)
self.labelSelector.setCurrentNode(label_node)
def updateList(self):
"""Observe the mrml scene for changes that we wish to respond to."""
tag = slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.clearTable)
tag = slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeAddedEvent,
self.visualizationWidget.requestNodeAddedUpdate)
self.observerTags.append((slicer.mrmlScene, tag))
def clearTable(self, caller, event):
self.onFourUpButton()
self.visualizationWidget.tableWidget.clearContents()
self.visualizationWidget.tableWidget.setRowCount(0)
self.visualizationWidget.leftRow = 0
self.visualizationWidget.rightObliqueRow = 0
self.visualizationWidget.rightHorizontalRow = 0
self.visualizationWidget.updateScene()
self.visualizationWidget.fiducialsCollapsibleButton.hide()
self.visualizationWidget.removeFiducialObservers()
class ILSpqWidget(object):
"""
A "QWidget"-like widget class that manages provides some
helper functionality (signals, slots...)
"""
def __init__(self):
self.connections = {} # list of slots per signal
def connect(self, signal, slot):
"""pseudo-connect - signal is arbitrary string and slot if callable"""
if signal not in self.connections:
self.connections[signal] = []
self.connections[signal].append(slot)
def disconnect(self, signal, slot):
"""pseudo-disconnect - remove the connection if it exists"""
if signal in self.connections:
if slot in self.connections[signal]:
self.connections[signal].remove(slot)
def emit(self, signal, args):
"""pseudo-emit - calls any slots connected to signal"""
if signal in self.connections:
for slot in self.connections[signal]:
slot(*args)
class ILSVisualizationWidget(ILSpqWidget):
"""
A "QWidget"-like class that manages some of the viewer options
used during lobe segmentation
"""
def __init__(self, logic, applyButton, buttonsList):
super(ILSVisualizationWidget, self).__init__()
self.logic = logic
self.applyButton = applyButton
self.fiducialButtonsList = buttonsList
self.widget = qt.QWidget()
self.layout = qt.QFormLayout(self.widget)
self.boxHolder = qt.QWidget()
self.boxHolder.setLayout(qt.QVBoxLayout())
self.layout.addRow(self.boxHolder)
self.groupBox = qt.QFrame()
self.groupBox.setLayout(qt.QHBoxLayout())
self.fiducialsCollapsibleButton = ctk.ctkCollapsibleButton()
self.fiducialsCollapsibleButton.text = "Show Fiducials"
self.fiducialsCollapsibleButton.hide()
self.fiducialsFormLayout = qt.QFormLayout(self.fiducialsCollapsibleButton)
# Table Widget Definition
self.tableWidget = qt.QTableWidget()
self.tableWidget.sortingEnabled = False
self.tableWidget.hide()
self.tableWidget.setColumnCount(3)
self.tableWidget.setColumnWidth(0, 190)
self.tableWidget.setColumnWidth(1, 190)
self.tableWidget.setColumnWidth(2, 190)
self.tableWidget.setMaximumWidth(590)
horizontalBar = self.tableWidget.horizontalScrollBar()
horizontalBar.setDisabled(True)
horizontalBar.hide()
self.tableWidget.setHorizontalHeaderLabels(
["Left Oblique Fiducials", "Right Oblique Fiducials", "Right Horizontal Fiducials"])
behavior = qt.QAbstractItemView()
self.tableWidget.setSelectionBehavior(behavior.SelectItems)
self.tableWidget.setSelectionMode(behavior.SingleSelection)
self.tableWidget.setContextMenuPolicy(3)
self.tableWidget.customContextMenuRequested.connect(self.onRightClick)
self.groupBox.layout().addWidget(self.tableWidget)
self.fiducialsFormLayout.addWidget(self.groupBox)
self.boxHolder.layout().addWidget(self.fiducialsCollapsibleButton)
self.pendingUpdate = False
self.updatingFiducials = False
self.observerTags = []
self.leftRow = 0
self.rightObliqueRow = 0
self.rightHorizontalRow = 0
self.tableItems = []
self.deletionGroupBox = qt.QFrame()
self.deletionGroupBox.setLayout(qt.QHBoxLayout())
self.fiducialsFormLayout.addWidget(self.deletionGroupBox)
#
# Delete Selected Fiducials Button
#
self.deleteButton = qt.QPushButton("Delete Selected Fiducial")
self.deleteButton.toolTip = "Select a fiducial from the table and push this button to delete the selected fiducial from the scene."
self.deleteButton.enabled = True
selectedIcon = qt.QIcon(":/Icons/MarkupsDelete.png")
self.deleteButton.setIcon(selectedIcon)
self.deleteButton.setFixedSize(220, 30)
self.deletionGroupBox.layout().addWidget(self.deleteButton)
self.deleteButton.connect('clicked(bool)', self.onDeleteOneFiducialButton)
#
# Delete All Fiducials Button
#
self.deleteAllButton = qt.QPushButton("Delete All Fiducials")
self.deleteAllButton.toolTip = "Delete all fiducials in the scene."
self.deleteAllButton.enabled = True
allIcon = qt.QIcon(":/Icons/MarkupsDeleteAllRows.png")
self.deleteAllButton.setIcon(allIcon)
self.deleteAllButton.setFixedSize(220, 30)
self.deletionGroupBox.layout().addWidget(self.deleteAllButton)
# self.fiducialsFormLayout.addRow(self.deleteAllButton)
self.deleteAllButton.connect('clicked(bool)', self.dialogBoxFunction)
def onDeleteOneFiducialButton(self):
selectedItem = self.tableWidget.selectedItems()
if not selectedItem:
return
item = selectedItem[0]
column = item.column()
row = item.row()
listsInScene = slicer.util.getNodes('vtkMRMLMarkupsFiducialNode*')
if not listsInScene:
return
name = None
if column == 0:
name = 'LO'
elif column == 1:
name = 'RO'
else:
name = 'RH'
selectedList = None
for selectedList in list(listsInScene.values()):
if selectedList.GetName() == name:
selectedList.RemoveMarkup(row)
break
if row == self.tableWidget.rowCount - 1:
self.tableWidget.takeItem(row, column)
else:
count = self.tableWidget.rowCount - row
for i in range(1, count):
currentRow = row + i
moved = self.tableWidget.takeItem(currentRow, column)
self.tableWidget.setItem(currentRow - 1, column, moved)
if column == 0:
self.leftRow -= 1
elif column == 1:
self.rightObliqueRow -= 1
else:
self.rightHorizontalRow -= 1
if self.leftRow >= self.rightObliqueRow and self.leftRow >= self.rightHorizontalRow:
self.tableWidget.setRowCount(self.leftRow)
elif self.rightObliqueRow >= self.leftRow and self.rightObliqueRow >= self.rightHorizontalRow:
self.tableWidget.setRowCount(self.rightObliqueRow)
elif self.rightHorizontalRow >= self.leftRow and self.rightHorizontalRow >= self.rightObliqueRow:
self.tableWidget.setRowCount(self.rightHorizontalRow)
self.updateScene()
listsInScene = slicer.util.getNodes('vtkMRMLMarkupsFiducialNode*')
name = ['LO', 'RO', 'RH']
if (listsInScene):
for fiducialList in list(listsInScene.values()):
if fiducialList.GetName() == name[0] and fiducialList.GetNumberOfFiducials() > 0:
self.applyButton.enabled = True
elif fiducialList.GetName() == name[1] and fiducialList.GetNumberOfFiducials() > 0:
self.applyButton.enabled = True
elif fiducialList.GetName() == name[2] and fiducialList.GetNumberOfFiducials() > 0:
self.applyButton.enabled = True
def onRightClick(self):
menu = qt.QMenu()
position = qt.QCursor.pos()
action = qt.QAction("Delete highlighted fiducial(s)", menu)
menu.addAction(action)
connectObject = qt.QObject()
connectObject.connect(action, 'triggered()', self.onDeleteOneFiducialButton)
action2 = qt.QAction("Cancel", menu)
menu.addAction(action2)
connectObject.connect(action2, 'triggered()', menu.hide)
menu.exec_(position)
def dialogBoxFunction(self):
self.deleteAllMsgBox = qt.QDialog(slicer.util.mainWindow())
# self.deleteAllMsgBox.setWindowTitle("Delete All Fiducials?")
self.deleteAllMsgBox.setFixedSize(200, 100)
self.deleteAllMsgBox.show()
self.deleteAllMsgBox.setLayout(qt.QVBoxLayout())
messageLabel = qt.QLabel("Delete All Fiducials?")
font = qt.QFont()
font.setPointSize(10)
messageLabel.setFont(font)
self.deleteAllMsgBox.layout().addWidget(messageLabel, 0, 4)
yesNoBox = qt.QFrame()
yesNoBox.setLayout(qt.QHBoxLayout())
self.deleteAllMsgBox.layout().addWidget(yesNoBox, 0, 4)
#
# OK button
#
okButton = qt.QPushButton()
okButton.setText("YES")
okButton.enabled = True
okIcon = qt.QIcon(":/Icons/AnnotationOkDone.png")
okButton.setIcon(okIcon)
yesNoBox.layout().addWidget(okButton)
#
# NO button
#
noButton = qt.QPushButton()
noButton.setText("NO")
noButton.enabled = True
noIcon = qt.QIcon(":/Icons/AnnotationCancel.png")
noButton.setIcon(noIcon)
yesNoBox.layout().addWidget(noButton)
# Connections
okButton.connect("clicked()", self.onDeleteAllButton)
noButton.connect("clicked()", self.deleteAllMsgBox.hide)
def onDeleteAllButton(self):
mrmlScene = slicer.mrmlScene
listsInScene = slicer.util.getNodes('vtkMRMLMarkupsFiducialNode*')
if (listsInScene):
for oldList in list(listsInScene.values()):
oldList.RemoveAllMarkups()
mrmlScene.RemoveNode(oldList)
rowCount = self.tableWidget.rowCount
for i in range(rowCount, -1, -1):
self.tableWidget.removeRow(i)
self.leftRow = 0
self.rightObliqueRow = 0
self.rightHorizontalRow = 0
self.updateScene()
self.deleteAllMsgBox.hide()
def updateScene(self):
applicationLogic = slicer.app.applicationLogic()
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.Reset(None)
self.applyButton.enabled = False
for i in self.fiducialButtonsList:
i.setStyleSheet("background-color: rgb(255,255,255)")
listsInScene = slicer.util.getNodes('vtkMRMLMarkupsFiducialNode*')
if (listsInScene):
mrmlScene = slicer.mrmlScene
for oldList in list(listsInScene.values()):
if oldList.GetName() == 'LO':
if self.leftRow == 0:
mrmlScene.RemoveNode(oldList)
if oldList.GetName() == 'RO':
if self.rightObliqueRow == 0:
mrmlScene.RemoveNode(oldList)
if oldList.GetName() == 'RH':
if self.rightHorizontalRow == 0:
mrmlScene.RemoveNode(oldList)
if self.tableWidget.rowCount == 0:
self.deleteButton.enabled = False
self.deleteAllButton.enabled = False
def updateFiducialArray(self):
"""Rebuild the list of buttons based on current landmarks"""
fiducialsLogic = slicer.modules.markups.logic()
originalActiveListID = fiducialsLogic.GetActiveListID()
originalActiveList = SlicerUtil.getNode(originalActiveListID)
if originalActiveList:
if originalActiveList.GetNumberOfFiducials() > 0:
self.updateTable()
self.applyButton.enabled = True
self.addFiducialObservers()
def addFiducialObservers(self):
"""Add observers to all fiducialLists in scene
so we will know when new markups are added
"""
self.removeFiducialObservers()
for fiducialList in list(slicer.util.getNodes('vtkMRMLMarkupsFiducialNode*').values()):
tag = fiducialList.AddObserver(fiducialList.PointPositionDefinedEvent, self.requestNodeAddedUpdate)
self.observerTags.append((fiducialList, tag))
def removeFiducialObservers(self):
"""Remove any existing observers"""
for obj, tag in self.observerTags:
obj.RemoveObserver(tag)
self.observerTags = []
def updateTable(self):
self.fiducialsCollapsibleButton.show()
fiducialsLogic = slicer.modules.markups.logic()
originalActiveListID = fiducialsLogic.GetActiveListID()
originalActiveList = SlicerUtil.getNode(originalActiveListID)
self.tableWidget.show()
self.deleteButton.enabled = True
self.deleteAllButton.enabled = True
if originalActiveList.GetName() == 'LO':
if self.tableWidget.rowCount == 0:
self.tableWidget.setRowCount(1)
elif self.leftRow >= self.rightObliqueRow and self.leftRow >= self.rightHorizontalRow:
self.tableWidget.setRowCount(self.leftRow + 1)
elif self.rightObliqueRow > self.leftRow and self.rightObliqueRow > self.rightHorizontalRow:
self.tableWidget.setRowCount(self.rightObliqueRow)
elif self.rightHorizontalRow > self.leftRow and self.rightHorizontalRow > self.rightObliqueRow:
self.tableWidget.setRowCount(self.rightHorizontalRow)
lastElement = originalActiveList.GetNumberOfFiducials() - 1
item = qt.QTableWidgetItem(originalActiveList.GetNthFiducialLabel(lastElement))
item.setToolTip(originalActiveList.GetName())
self.tableItems.append(item)
self.tableWidget.setItem(self.leftRow, 0, item)
self.leftRow += 1
elif originalActiveList.GetName() == 'RO':
if self.tableWidget.rowCount == 0:
self.tableWidget.setRowCount(1)
elif self.leftRow > self.rightObliqueRow and self.leftRow > self.rightHorizontalRow:
self.tableWidget.setRowCount(self.leftRow)
elif self.rightObliqueRow >= self.leftRow and self.rightObliqueRow >= self.rightHorizontalRow:
self.tableWidget.setRowCount(self.rightObliqueRow + 1)
elif self.rightHorizontalRow > self.leftRow and self.rightHorizontalRow > self.rightObliqueRow:
self.tableWidget.setRowCount(self.rightHorizontalRow)
lastElement = originalActiveList.GetNumberOfFiducials() - 1
item = qt.QTableWidgetItem(originalActiveList.GetNthFiducialLabel(lastElement))
item.setToolTip(originalActiveList.GetName())
self.tableItems.append(item)
self.tableWidget.setItem(self.rightObliqueRow, 1, item)
self.rightObliqueRow += 1
elif originalActiveList.GetName() == 'RH':
if self.tableWidget.rowCount == 0:
self.tableWidget.setRowCount(1)
elif self.leftRow > self.rightObliqueRow and self.leftRow > self.rightHorizontalRow:
self.tableWidget.setRowCount(self.leftRow)
elif self.rightObliqueRow > self.leftRow and self.rightObliqueRow > self.rightHorizontalRow:
self.tableWidget.setRowCount(self.rightObliqueRow)
elif self.rightHorizontalRow >= self.leftRow and self.rightHorizontalRow >= self.rightObliqueRow:
self.tableWidget.setRowCount(self.rightHorizontalRow + 1)
lastElement = originalActiveList.GetNumberOfFiducials() - 1
item = qt.QTableWidgetItem(originalActiveList.GetNthFiducialLabel(lastElement))
item.setToolTip(originalActiveList.GetName())
self.tableItems.append(item)
self.tableWidget.setItem(self.rightHorizontalRow, 2, item)
self.rightHorizontalRow += 1
def requestNodeAddedUpdate(self, caller, event):
"""Start a SingleShot timer that will check the fiducials
in the scene and add them to the list"""
if not self.pendingUpdate:
qt.QTimer.singleShot(0, self.wrappedNodeAddedUpdate)
self.pendingUpdate = True
def wrappedNodeAddedUpdate(self):
try:
self.nodeAddedUpdate()
except Exception as e:
import traceback
traceback.print_exc()
qt.QMessageBox.warning(slicer.util.mainWindow(),
"Node Added", 'Exception!\n\n' + str(e) + "\n\nSee Python Console for Stack Trace")
def nodeAddedUpdate(self):
"""Perform the update of any new fiducials.
First collect from any fiducial lists not associated with one of our
lists (like when the process first gets started) and then check for
new fiducials added to one of our lists.
End result should be one fiducial per list with identical names and
correctly assigned associated node ids.
Most recently created new fiducial is picked as active landmark.
"""
if self.updatingFiducials:
return
self.updatingFiducials = True
self.logic.ModifyList()
self.updateFiducialArray()
self.pendingUpdate = False
self.updatingFiducials = False
#
# CIP_InteractiveLobeSegmentationLogic
#
class CIP_InteractiveLobeSegmentationLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget
"""
def __init__(self):
self.name = "Fiducial"
def hasImageData(self, volumeNode):
"""This is a dummy logic method that
returns true if the passed in volume
node has valid image data
"""
if not volumeNode:
print('no volume node')
return False
if volumeNode.GetImageData() == None:
print('no image data')
return False
return True
def delayDisplay(self, message, msec=1000):
#
# logic version of delay display
#
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message, self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def createList(self, name):
"""Add an instance of a fiducial to the scene for a given
volume node. Creates a new list if needed.
If list already has a fiducial with the given name, then
set the position to the passed value.
"""
applicationLogic = slicer.app.applicationLogic()
selectionNode = applicationLogic.GetSelectionNode()
selectionNode.SetReferenceActivePlaceNodeClassName("vtkMRMLMarkupsFiducialNode")
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.SwitchToPersistentPlaceMode()
fiducialsLogic = slicer.modules.markups.logic()
listsInScene = slicer.util.getNodes('vtkMRMLMarkupsFiducialNode*')
createNewList = True
if (listsInScene):
for oldList in list(listsInScene.values()):
if oldList.GetName() == name:
fiducialsLogic.SetActiveListID(oldList)
createNewList = False
break
if (createNewList):
fiducialListNodeID = fiducialsLogic.AddNewFiducialNode("Fiducial", slicer.mrmlScene)
fiducialList = SlicerUtil.getNode(fiducialListNodeID)
fiducialsLogic.SetActiveListID(fiducialList)
else:
fiducialListNodeID = fiducialsLogic.AddNewFiducialNode("Fiducial", slicer.mrmlScene)
fiducialList = SlicerUtil.getNode(fiducialListNodeID)
fiducialsLogic.SetActiveListID(fiducialList)
def ModifyList(self):
"""Look at each fiducial list in scene and find any fiducials associated
with one of our volumes but not in in one of our lists.
Add the fiducial as a landmark and delete it from the other list.
Return the name of the last added landmark if it exists.
"""
fiducialsLogic = slicer.modules.markups.logic()
originalActiveListID = fiducialsLogic.GetActiveListID() # TODO: naming convention?
if (SlicerUtil.getNode(originalActiveListID)):
fiducialList = SlicerUtil.getNode(originalActiveListID)
fiducialList.SetName(self.name)
name = self.name
fiducialList.SetNthFiducialLabel(0, name + "-1")
else:
return
def run(self, labelVolume, outputVolume):
"""
Run the actual algorithm
"""
listsInScene = slicer.util.getNodes('vtkMRMLMarkupsFiducialNode*')
leftObliqueFiducials = None
rightObliqueFiducials = None
rightHorizontalFiducials = None
name = ['LO', 'RO', 'RH']
if (listsInScene):
for fiducialList in list(listsInScene.values()):
if fiducialList.GetName() == name[0]:
leftObliqueFiducials = fiducialList
elif fiducialList.GetName() == name[1]:
rightObliqueFiducials = fiducialList
elif fiducialList.GetName() == name[2]:
rightHorizontalFiducials = fiducialList
parameters = {
"inLabelMapFileName": labelVolume.GetID(),
"outLabelMapFileName": outputVolume.GetID(),
}
if leftObliqueFiducials:
parameters["leftObliqueFiducials"] = leftObliqueFiducials
if rightObliqueFiducials:
if rightHorizontalFiducials:
parameters["rightObliqueFiducials"] = rightObliqueFiducials
parameters["rightHorizontalFiducials"] = rightHorizontalFiducials
else:
qt.QMessageBox.warning(slicer.util.mainWindow(),
"Interactive Lobe Segmentation", "Please place fiducials on the right horizontal fissure.")
return False
elif rightHorizontalFiducials:
qt.QMessageBox.warning(slicer.util.mainWindow(),
"Interactive Lobe Segmentation", "Please place fiducials on the right oblique fissure.")
return False
slicer.cli.run(slicer.modules.segmentlunglobes, None, parameters, wait_for_completion=True)
selectionNode = slicer.app.applicationLogic().GetSelectionNode()
selectionNode.SetReferenceActiveLabelVolumeID(outputVolume.GetID())
outputVolume.SetName(labelVolume.GetName().replace("_partialLungLabelMap", "_interactiveLobeSegmentation"))
slicer.app.applicationLogic().PropagateLabelVolumeSelection(0)
return outputVolume
|
{
"content_hash": "00e13140971a83a33954494c756225ca",
"timestamp": "",
"source": "github",
"line_count": 1028,
"max_line_length": 176,
"avg_line_length": 43.69357976653696,
"alnum_prop": 0.6588819377963799,
"repo_name": "acil-bwh/SlicerCIP",
"id": "026a169dd72bef0c18dfa6cc4b2bacf357109aad",
"size": "44917",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Scripted/CIP_InteractiveLobeSegmentation/CIP_InteractiveLobeSegmentation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5995"
},
{
"name": "C++",
"bytes": "251883"
},
{
"name": "CMake",
"bytes": "65642"
},
{
"name": "CSS",
"bytes": "38951"
},
{
"name": "HTML",
"bytes": "6261"
},
{
"name": "Python",
"bytes": "3855137"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import submission
from .submission.run_context import RunContext
from .submission.submit import SubmitTarget
from .submission.submit import PathType
from .submission.submit import SubmitConfig
from .submission.submit import get_path_from_template
from .submission.submit import submit_run
from .util import EasyDict
submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
|
{
"content_hash": "ae66dba832d0dc82144db742bf10e8b1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 126,
"avg_line_length": 39.85,
"alnum_prop": 0.7979924717691342,
"repo_name": "microsoft/DiscoFaceGAN",
"id": "ad43827d8a279c4a797e09b51b8fd96e8e003ee6",
"size": "799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dnnlib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "380445"
}
],
"symlink_target": ""
}
|
class InputCell(object):
def __init__(self, initial_value):
self.value = None
class ComputeCell(object):
def __init__(self, inputs, compute_function):
self.value = None
def add_callback(self, callback):
pass
def remove_callback(self, callback):
pass
|
{
"content_hash": "c83c152193fc2b10999322ebc301cd69",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 49,
"avg_line_length": 21.571428571428573,
"alnum_prop": 0.6158940397350994,
"repo_name": "N-Parsons/exercism-python",
"id": "c7de0e007844583b1b0d121ecf5ce910aee3c018",
"size": "302",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "exercises/react/react.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "555991"
},
{
"name": "Shell",
"bytes": "1199"
}
],
"symlink_target": ""
}
|
import sys
import json
import string
def hw():
print 'Hello, world!'
def lines(fp):
print str(len(fp.readlines()))
def sent_dict(ssf):
"""
#Creates a dictionary from the sentiment input file
"""
scores = {}
for line in ssf:
term, score = line.split("\t")
scores[term] = int(score)
return scores
def assign_sentiment(sf, tf):
"""
Reads and parses twitter output. Finds tweet text and tries to find any sentiment words in it.
"""
i = 0
j = 0
tweets = []
for line in tf:
row= json.loads(line)
if "text" in row:
tweets.append(row["text"])
i+=1
else:
tweets.append("0")
j+=1
#print "Number of tweets:", i, "Number of deleted/empty tweets:", j
dictionary = sent_dict(sf)
for line in range(len(tweets)):
tweet = tweets[line].encode('utf-8').rstrip('?:!.,;')
split_tweet = tweet.split(" ")
#print split_tweet
sentiments = []
sent_score = float(0.0)
for i in range(len(split_tweet)):
for key,value in dictionary.iteritems():
if key == split_tweet[i]:
sentiments.append(key)
sent_score += float(value)
print sent_score
def main():
sent_file = open(sys.argv[1])
tweet_file = open(sys.argv[2])
#lines(sent_file)
#lines(tweet_file)
#hw()
assign_sentiment(sent_file, tweet_file)
if __name__ == '__main__':
main()
|
{
"content_hash": "3fcedd8f70ea8df970222cca753e1d32",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 98,
"avg_line_length": 21.323943661971832,
"alnum_prop": 0.5429326287978864,
"repo_name": "jbagd/twitter-sentiment",
"id": "35080718d379c030f5ef33413fbeec042be2b7de",
"size": "1514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweet_sentiment.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8903"
}
],
"symlink_target": ""
}
|
import time
import requests
class AnticaptchaError(Exception):
pass
class Anticaptcha(object):
base_url = "https://api.anti-captcha.com"
create_task_url = base_url + "/createTask"
get_task_result_url = base_url + "/getTaskResult"
language_pool = "en"
soft_id = 850
def __init__(self, api_key):
self.api_key = api_key
self.session = requests.Session()
def _post(self, url, **kwargs):
response = self.session.post(url, **kwargs).json()
if response.get("errorId"):
raise AnticaptchaError(
"%s: %s" % (response.get("errorCode"), response.get("errorDescription"))
)
return response
def _create_task(self, url, site_key):
data = {
"clientKey": self.api_key,
"task": {
"type": "NoCaptchaTaskProxyless",
"websiteURL": url,
"websiteKey": site_key,
},
"softId": self.soft_id,
"languagePool": self.language_pool,
}
response = self._post(self.create_task_url, json=data)
return response.get("taskId")
def _get_task_result(self, task_id):
data = {"clientKey": self.api_key, "taskId": task_id}
return self._post(self.get_task_result_url, json=data)
def _wait_for_task_result(self, task_id):
result = self._get_task_result(task_id)
if result.get("status") == "ready":
return result
time.sleep(2)
return self._wait_for_task_result(task_id)
def get_recaptcha_response(self, url, site_key):
task_id = self._create_task(url, site_key)
result = self._wait_for_task_result(task_id)
return result["solution"]["gRecaptchaResponse"]
|
{
"content_hash": "87a2ed1de67eaf4f54b93fe6be3aa948",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 88,
"avg_line_length": 30.56896551724138,
"alnum_prop": 0.5707839819514946,
"repo_name": "bogdal/freepacktbook",
"id": "c921ae71340995fc7b2a0f439a5fe557e8d962ec",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freepacktbook/anticaptcha.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "387"
},
{
"name": "Python",
"bytes": "21478"
}
],
"symlink_target": ""
}
|
import synapse.lib.cli as s_cli
from synapse.tests.common import *
class TstThrowCmd(s_cli.Cmd):
'''
Throw an exception.
'''
_cmd_name = 'throwzero'
def runCmdOpts(self, opts):
ret = 1 / 0
class TstThrowKeyboard(s_cli.Cmd):
'''
Throw an exception.
'''
_cmd_name = 'throwkeyboard'
def runCmdOpts(self, opts):
raise KeyboardInterrupt('TstThrowKeyboard')
class CliTest(SynTest):
def test_cli_prompt(self):
outp = self.getTestOutp()
with s_cli.Cli(None, outp=outp) as cli:
self.eq(cli.getCmdPrompt(), 'cli> ')
cli.cmdprompt = 'hehe> '
self.eq(cli.getCmdPrompt(), 'hehe> ')
def test_cli_get_set(self):
outp = self.getTestOutp()
with s_cli.Cli(None, outp=outp, hehe='haha') as cli:
self.eq(cli.get('hehe'), 'haha')
self.none(cli.get('foo'))
cli.set('foo', 'bar')
self.eq(cli.get('foo'), 'bar')
cli.runCmdLine('locs')
self.true(outp.expect('hehe'))
self.true(outp.expect('haha'))
self.true(outp.expect('foo'))
self.true(outp.expect('bar'))
def test_cli_quit(self):
outp = self.getTestOutp()
with s_cli.Cli(None, outp=outp) as cli:
cli.runCmdLine('quit')
self.true(cli.isfini)
def test_cli_help(self):
outp = self.getTestOutp()
with s_cli.Cli(None, outp=outp) as cli:
cli.runCmdLine('help')
self.true(outp.expect('Quit the current command line interpreter.'))
def test_cli_notacommand(self):
outp = self.getTestOutp()
with s_cli.Cli(None, outp=outp) as cli:
cli.runCmdLine('notacommand')
self.true(outp.expect('cmd not found: notacommand'))
def test_cli_cmdret(self):
class WootCmd(s_cli.Cmd):
_cmd_name = 'woot'
def runCmdOpts(self, opts):
return 20
with s_cli.Cli(None) as cli:
cli.addCmdClass(WootCmd)
self.eq(cli.runCmdLine('woot'), 20)
def test_cli_cmd(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
self.nn(quit.getCmdDoc())
self.nn(quit.getCmdBrief())
def test_cli_opts_flag(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
quit._cmd_syntax = (
('--bar', {}),
('haha', {'type': 'valu'}),
)
opts = quit.getCmdOpts('quit --bar hoho')
self.eq(opts.get('bar'), True)
self.eq(opts.get('haha'), 'hoho')
def test_cli_opts_list(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
quit._cmd_syntax = (
('--bar', {}),
('haha', {'type': 'list'}),
)
opts = quit.getCmdOpts('quit --bar hoho haha "hehe hehe"')
self.eq(opts.get('bar'), True)
self.eq(tuple(opts.get('haha')), ('hoho', 'haha', 'hehe hehe'))
def test_cli_opts_glob(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
quit._cmd_syntax = (
('--bar', {}),
('haha', {'type': 'glob'}),
)
opts = quit.getCmdOpts('quit --bar hoho lulz')
self.eq(opts.get('bar'), True)
self.eq(opts.get('haha'), 'hoho lulz')
def test_cli_opts_defval(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
quit._cmd_syntax = (
('--bar', {'type': 'valu', 'defval': 'lol'}),
('haha', {'type': 'glob'}),
)
opts = quit.getCmdOpts('quit hoho lulz')
self.eq(opts.get('bar'), 'lol')
def test_cli_opts_parse_valu(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
quit._cmd_syntax = (
('--bar', {'type': 'valu'}),
)
opts = quit.getCmdOpts('quit --bar woah')
self.eq(opts.get('bar'), 'woah')
self.raises(BadSyntaxError, quit.getCmdOpts, 'quit --bar woah this is too much text')
def test_cli_opts_parse_list(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
quit._cmd_syntax = (
('--bar', {'type': 'list'}),
)
# The list must be quoted
opts = quit.getCmdOpts('quit --bar "1,2,3"')
self.eq(opts.get('bar'), ['1', '2', '3'])
# Or encapsulated in a storm list syntax
opts = quit.getCmdOpts('quit --bar (1, 2, 3)')
self.eq(opts.get('bar'), [1, 2, 3])
# A single item is fine
opts = quit.getCmdOpts('quit --bar woah')
self.eq(opts.get('bar'), ['woah'])
def test_cli_opts_parse_enums(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
quit._cmd_syntax = (
('--bar', {'type': 'enum', 'enum:vals': ('foo', 'baz')}),
)
opts = quit.getCmdOpts('quit --bar foo')
self.eq(opts.get('bar'), 'foo')
opts = quit.getCmdOpts('quit --bar baz')
self.eq(opts.get('bar'), 'baz')
self.raises(BadSyntaxError, quit.getCmdOpts, 'quit --bar')
self.raises(BadSyntaxError, quit.getCmdOpts, 'quit --bar bar')
def test_cli_opts_parse_kwlist(self):
with s_cli.Cli(None) as cli:
quit = cli.getCmdByName('quit')
quit._cmd_syntax = (
('bar', {'type': 'kwlist'}),
)
opts = quit.getCmdOpts('quit hehe=haha')
self.eq(opts.get('bar'), [('hehe', 'haha')])
def test_cli_cmd_loop_quit(self):
outp = self.getTestOutp()
cmdg = CmdGenerator(['help', 'quit'])
with mock.patch('synapse.lib.cli.get_input', cmdg) as p:
with s_cli.Cli(None, outp) as cli:
cli.runCmdLoop()
self.eq(cli.isfini, True)
self.true(outp.expect('o/'))
def test_cli_cmd_loop_eof(self):
outp = self.getTestOutp()
cmdg = CmdGenerator(['help'], on_end=EOFError)
with mock.patch('synapse.lib.cli.get_input', cmdg) as p:
with s_cli.Cli(None, outp) as cli:
cli.runCmdLoop()
self.eq(cli.isfini, True)
self.false(outp.expect('o/', throw=False))
def test_cli_cmd_loop_bad_input(self):
outp = self.getTestOutp()
cmdg = CmdGenerator([1234], on_end=EOFError)
with mock.patch('synapse.lib.cli.get_input', cmdg) as p:
with s_cli.Cli(None, outp) as cli:
cli.runCmdLoop()
self.eq(cli.isfini, True)
self.true(outp.expect("AttributeError: 'int' object has no attribute 'strip'", throw=False))
def test_cli_cmd_loop_keyint(self):
outp = self.getTestOutp()
cmdg = CmdGenerator(['help'], on_end=KeyboardInterrupt)
data = {'count': 0}
def _onGetInput(mesg):
data['count'] = data['count'] + 1
if data['count'] > 2:
cmdg.addCmd('quit')
with mock.patch('synapse.lib.cli.get_input', cmdg) as p:
with s_cli.Cli(None, outp) as cli:
cli.on('cli:getinput', _onGetInput)
cli.runCmdLoop()
self.eq(cli.isfini, True)
self.true(outp.expect('<ctrl-c>'))
def test_cli_cmd_loop(self):
outp = self.getTestOutp()
cmdg = CmdGenerator(['help',
'locs',
'',
' ',
'throwzero',
'throwkeyboard',
'quit',
])
with mock.patch('synapse.lib.cli.get_input', cmdg) as p:
with s_cli.Cli(None, outp) as cli:
cli.addCmdClass(TstThrowCmd)
cli.addCmdClass(TstThrowKeyboard)
cli.runCmdLoop()
self.true(outp.expect('o/'))
self.true(outp.expect('{}'))
self.true(outp.expect('ZeroDivisionError'))
self.true(outp.expect('<ctrl-c>'))
self.true(cli.isfini)
|
{
"content_hash": "39ead391023bd14ea132af924f0aae0f",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 100,
"avg_line_length": 30.82116788321168,
"alnum_prop": 0.4966252220248668,
"repo_name": "vivisect/synapse",
"id": "4e4dcb27e65c7b4095e0733171b8adefe99cb415",
"size": "8445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/tests/test_lib_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716598"
}
],
"symlink_target": ""
}
|
def main():
# equilibrate then isolate cold finger
info('Equilibrate then isolate coldfinger')
close(name="C", description="Bone to Turbo")
sleep(1)
open(name="B", description="Bone to Diode Laser")
sleep(20)
close(name="B", description="Bone to Diode Laser")
|
{
"content_hash": "914f09486bb3c2711fcd97844224d74e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 35.875,
"alnum_prop": 0.6724738675958188,
"repo_name": "NMGRL/pychron",
"id": "70135f5b49049c75cfb17ec24f3f02b4f1a805aa",
"size": "287",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "docs/user_guide/operation/scripts/examples/helix/extraction/felix/EquilibrateThenIsolateDiodeColdfinger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.conf import settings
from blog.models import Post, Blog, BlogUserAccess
class BlogUserAccessInline(admin.TabularInline):
model = BlogUserAccess
raw_id_fields = ('user', )
class BlogAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
inlines = [
BlogUserAccessInline,
]
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'publish', 'status')
list_filter = ('publish', 'status')
search_fields = ('title', 'body', 'tease')
if 'voter' in settings.INSTALLED_APPS:
raw_id_fields = ('author', 'rating')
else:
raw_id_fields = ('author', )
admin.site.register(Blog, BlogAdmin)
admin.site.register(Post, PostAdmin)
|
{
"content_hash": "20539567676118272ce036564e2d284d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 59,
"avg_line_length": 26.233333333333334,
"alnum_prop": 0.6404066073697586,
"repo_name": "ilblackdragon/django-blogs",
"id": "f52609e726760e9265f0c7056da81fe6b3fae5a2",
"size": "806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5214"
},
{
"name": "Makefile",
"bytes": "206"
},
{
"name": "Python",
"bytes": "182264"
}
],
"symlink_target": ""
}
|
"""
Module to update migration scenario file migrate.yaml, during execution,
point to insert fail step ('fail_migration') is being chosen randomly by
_insert_break_point method from list of steps generated by _get_list_of_tasks
method.
"""
import os
import random
import yaml
from filtering_utils import FilteringUtils
class RollbackScenarioGeneration(object):
def __init__(self):
self.migration_utils = FilteringUtils()
self.file_path = 'devlab/tests/migrate.yaml'
self.main_folder = self.migration_utils.main_folder
self.full_path = os.path.join(self.main_folder, self.file_path)
self.exception_task = {'fail_migration': True}
self.steps_list = []
def _read_migrationation_file(self):
migration_data = self.migration_utils.load_file(self.file_path)[0]
return migration_data
@staticmethod
def _dump_into_file(file_path, data):
with open(file_path, "w") as f:
yaml.dump(data, f, default_flow_style=False)
@staticmethod
def _verification(_step):
if isinstance(_step, dict):
if isinstance(_step.values()[0], bool) or \
isinstance(_step.values()[0], list) and \
len(_step.values()[0]) == 1:
return True
def _get_list_of_tasks(self, search_dict):
for key, value in search_dict.iteritems():
if self._verification(search_dict):
self.steps_list.append(search_dict)
elif isinstance(value, list):
for item in value:
self._get_list_of_tasks(item)
elif isinstance(value, dict):
self._get_list_of_tasks(value)
return self.steps_list
def _insert_break_point(self, search_dict, field):
for key, value in search_dict.iteritems():
if isinstance(value, dict):
if self._verification(value):
return
else:
self._insert_break_point(value, field)
elif isinstance(value, list):
if field in value:
index = value.index(field) + 1
value.insert(index, self.exception_task)
return
else:
for item in value:
if isinstance(item, dict):
self._insert_break_point(item, field)
def _find_break_point(self, search_dict, field):
fields_found = []
for key, value in search_dict.iteritems():
if key == field:
fields_found.append(value)
elif isinstance(value, dict):
results = self._find_break_point(value, field)
for result in results:
fields_found.append(result)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
more_results = self._find_break_point(item, field)
for another_result in more_results:
fields_found.append(another_result)
return fields_found
def generate_exception_task_in_random_point(self):
migration_data = self._read_migrationation_file()
data = None
for key, value in migration_data.iteritems():
if key == 'process':
data = {key: value}
list_of_steps = self._get_list_of_tasks(data)
random_step = random.choice(list_of_steps)
self._insert_break_point(data, random_step)
print('\n\nBreak point was set after:\n{}, index: {}\n\n'.format(
random_step, list_of_steps.index(random_step)))
try:
assert(self._find_break_point(migration_data,
self.exception_task.keys()[0])
== self.exception_task.values())
self._dump_into_file(self.full_path, migration_data)
except Exception as e:
print('Integration of failure step into migration scenario failed '
'with following error: \n\n{}'.format(e))
if __name__ == '__main__':
rollback = RollbackScenarioGeneration()
rollback.generate_exception_task_in_random_point()
|
{
"content_hash": "b702e53d32001f8cccc92a3d37caf361",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 79,
"avg_line_length": 39.675925925925924,
"alnum_prop": 0.5628938156359393,
"repo_name": "Settis/CloudFerry",
"id": "bcfdb98e1cbbb42c4b7594dc26a8c36c653a5c28",
"size": "4860",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "devlab/tests/rollback_scenario_generation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "362999"
}
],
"symlink_target": ""
}
|
import abc
import six
from cdn.provider.base import controller
from cdn.provider.base import responder
@six.add_metaclass(abc.ABCMeta)
class ServicesControllerBase(controller.ProviderControllerBase):
def __init__(self, driver):
super(ServicesControllerBase, self).__init__(driver)
self.responder = responder.Responder(driver.provider_name)
@abc.abstractmethod
def update(self, service_name, service_json):
raise NotImplementedError
@abc.abstractmethod
def create(self, service_name, service_json):
raise NotImplementedError
@abc.abstractmethod
def delete(self, service_name):
raise NotImplementedError
|
{
"content_hash": "d8b1d7cb25408d7785ac4cf8ccaf835e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 26.076923076923077,
"alnum_prop": 0.7300884955752213,
"repo_name": "obulpathi/cdn1",
"id": "482c839e38f2fca203f3d6c282fa6a3d5adef192",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cdn/provider/base/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "277977"
},
{
"name": "Shell",
"bytes": "4049"
}
],
"symlink_target": ""
}
|
"""A experimental script for setting up DVS recording with VOT dataset.
This script uses Sacred from IDSIA lab to setup the experiment.
This allows me to configure experiment by JSON file.
Author: Yuhuang Hu
Email : duugyue100@gmail.com
"""
from sacred import Experiment
import os
import cPickle as pickle
import numpy as np
import cv2
from spikefuel import tools, gui, helpers
exp = Experiment("DVS Recording - VOT")
exp.add_config({
"vot_dir": "",
"vot_stats_path": "",
"recording_save_path": "",
"viewer_id": 1,
"screen_height": 0,
"screen_width": 0,
"work_win_scale": 0.9,
"bg_color": [255, 0, 0],
"fps": 0
})
@exp.automain
def dvs_vot_exp(vot_dir,
vot_stats_path,
recording_save_path,
viewer_id,
screen_height,
screen_width,
work_win_scale,
bg_color,
fps):
"""Setup an experiment for VOT dataset.
Parameters
----------
vot_dir : string
absolute path of VOT dataset
e.g. /home/user/vot2015
vot_stats_path : string
path to vot dataset stats
recording_save_path : string
path to logged recording data
viewer_id : int
the ID of jAER viewer, for Linux is 1, Mac OS X is 2
screen_height : int
height of the screen in pixel
screen_width : int
width of the screen in pixel
work_win_scale : float
the scaling factor that calculates working window size
bg_color : list
background color definition
fps : int
frame per second while displaying the video,
will round to closest number
"""
if not os.path.exists(str(recording_save_path)):
os.mkdir(str(recording_save_path))
# Load VOT stats
f = file(vot_stats_path, mode="r")
vot_stats = pickle.load(f)
f.close()
vot_list = vot_stats['vot_list']
num_frames = vot_stats['num_frames']
# Load groundtruth and image lists
print "[MESSAGE] Loading image lists."
lists = []
for i in xrange(len(num_frames)):
list_path = os.path.join(vot_dir, vot_list[i])
temp_list = tools.create_vot_image_list(list_path, num_frames[i])
lists.append(temp_list)
print "[MESSAGE] Ground truths and image lists are loaded."
# Create full background
background = (np.ones((screen_height,
screen_width, 3))*bg_color).astype(np.uint8)
# Setup OpenCV display window
window_title = "DVS-VOT-EXP"
cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)
# Experiment setup calibration
# Not without tuning images
swin_h, swin_w = helpers.calibration(win_h=screen_height,
win_w=screen_width,
scale=work_win_scale,
window_title=window_title,
bg_color=bg_color)
# Init a general UDP socket
s = tools.init_dvs()
tools.reset_dvs_time(s)
for k in xrange(len(num_frames)):
print "[MESSAGE] Display video sequence %i" % (k+1)
frames = []
for i in xrange(num_frames[k]):
frames.append(cv2.imread(lists[k][i]))
new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
bg_color)
new_frames = gui.create_border_sequence(new_frames,
screen_height, screen_width,
bg_color)
cv2.imshow(window_title, new_frames[0])
print "[MESSAGE] Adapting video sequence %i" % (k+1)
cv2.waitKey(delay=2000)
tools.start_log_dvs(s, recording_save_path, vot_list[k], viewer_id)
for i in xrange(num_frames[k]):
cv2.imshow(window_title, new_frames[i])
key = cv2.waitKey(delay=int(1000/fps)) & 0xFF
if key == 27:
cv2.destroyAllWindows()
quit()
cv2.imshow(window_title, new_frames[-1])
tools.stop_log_dvs(s, viewer_id)
print "[MESSAGE] Releasing video sequence %i" % (k+1)
cv2.waitKey(delay=2000)
cv2.imshow(window_title, background)
cv2.waitKey(delay=1000)
tools.reset_dvs_time(s)
print "[MESSAGE] Video sequence %i is logged." % (k+1)
# Destory both scoket and opencv window
tools.destroy_dvs(s)
cv2.destroyAllWindows()
|
{
"content_hash": "44d87ee15466ebcf768efef7fd579b20",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 76,
"avg_line_length": 32.731884057971016,
"alnum_prop": 0.5711755589993358,
"repo_name": "duguyue100/spikefuel",
"id": "01a6784163d1fdaa6ef7a992c6ea4571786e33ab",
"size": "4517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/dvs_vot_exp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187313"
}
],
"symlink_target": ""
}
|
import collections
import hashlib
from django import forms
from django.conf import settings
from django.db import models
from django.db.models import Max
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from account.models import SignupCode, SignupCodeResult
from account.signals import user_signed_up
from . import trello
User = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
SURVEY_SECRET = getattr(settings, "WAITINGLIST_SURVEY_SECRET", settings.SECRET_KEY)
class WaitingListEntry(models.Model):
email = models.EmailField(_("email address"), unique=True)
created = models.DateTimeField(_("created"), default=timezone.now, editable=False)
trello_card_id = models.CharField(max_length=100, blank=True)
initial_contact_sent = models.BooleanField(default=False)
def reset_trello_link(self):
if self.trello_card_id:
api = trello.Api()
api.delete_card(self.trello_card_id)
self.trello_card_id = ""
self.save()
class Meta:
verbose_name = _("waiting list entry")
verbose_name_plural = _("waiting list entries")
def __unicode__(self):
return self.email
@receiver(post_save, sender=WaitingListEntry)
def handle_waitinglistentry_save(sender, **kwargs):
if kwargs.get("created"):
try:
survey = Survey.objects.get(active=True)
SurveyInstance.objects.create(
survey=survey,
entry=kwargs.get("instance")
)
except Survey.DoesNotExist:
pass
class Survey(models.Model):
label = models.CharField(max_length=100, unique=True)
active = models.BooleanField(default=True)
def __unicode__(self):
return self.label
def save(self, *args, **kwargs):
if self.active:
Survey.objects.filter(active=True).update(active=False)
return super(Survey, self).save(*args, **kwargs)
class SurveyInstance(models.Model):
survey = models.ForeignKey(Survey, related_name="instances")
entry = models.OneToOneField(WaitingListEntry)
code = models.CharField(max_length=200, unique=True)
def generate_hash(self):
return hashlib.md5(self.entry.email + SURVEY_SECRET).hexdigest()
def save(self, *args, **kwargs):
self.code = self.generate_hash()
return super(SurveyInstance, self).save(*args, **kwargs)
class SurveyQuestion(models.Model):
TEXT_FIELD = 0
TEXT_AREA = 1
RADIO_CHOICES = 2
CHECKBOX_FIELD = 3
BOOLEAN_FIELD = 4
FIELD_TYPE_CHOICES = [
(TEXT_FIELD, "text field"),
(TEXT_AREA, "textarea"),
(RADIO_CHOICES, "radio choices"),
(CHECKBOX_FIELD, "checkbox field (can select multiple answers"),
(BOOLEAN_FIELD, "boolean field")
]
survey = models.ForeignKey(Survey, related_name="questions")
question = models.TextField()
kind = models.IntegerField(choices=FIELD_TYPE_CHOICES)
help_text = models.TextField(blank=True)
ordinal = models.IntegerField(blank=True)
required = models.BooleanField(default=False)
trello_list_id = models.CharField(max_length=100, blank=True)
class Meta:
unique_together = [
("survey", "question")
]
ordering = ["ordinal"]
@property
def name(self):
return slugify(self.question)
def form_field(self):
kwargs = dict(
label=self.question,
help_text=self.help_text,
required=self.required
)
field_class = forms.CharField
if self.kind == SurveyQuestion.TEXT_AREA:
kwargs.update({"widget": forms.Textarea()})
elif self.kind == SurveyQuestion.RADIO_CHOICES:
field_class = forms.ModelChoiceField
kwargs.update({"widget": forms.RadioSelect(), "queryset": self.choices.all()})
elif self.kind == SurveyQuestion.CHECKBOX_FIELD:
field_class = forms.ModelMultipleChoiceField
kwargs.update({"widget": forms.CheckboxSelectMultiple(),
"queryset": self.choices.all()})
elif self.kind == SurveyQuestion.BOOLEAN_FIELD:
field_class = forms.BooleanField
return field_class(**kwargs)
def save(self, *args, **kwargs):
if not self.pk:
max_ordinal = self.survey.questions.aggregate(
Max("ordinal")
)["ordinal__max"] or 0
self.ordinal = max_ordinal + 1
return super(SurveyQuestion, self).save(*args, **kwargs)
class SurveyQuestionChoice(models.Model):
question = models.ForeignKey(SurveyQuestion, related_name="choices")
label = models.CharField(max_length=100)
def __unicode__(self):
return self.label
class SurveyAnswer(models.Model):
instance = models.ForeignKey(SurveyInstance, related_name="answers")
question = models.ForeignKey(SurveyQuestion, related_name="answers")
value = models.TextField(blank=True)
value_boolean = models.NullBooleanField(blank=True)
created = models.DateTimeField(_("created"), default=timezone.now, editable=False)
trello_card_id = models.CharField(max_length=100, blank=True)
Member = collections.namedtuple("Member", ["email", "signup_code", "user", "invited"])
class Cohort(models.Model):
name = models.CharField(_("name"), max_length=35)
created = models.DateTimeField(_("created"), default=timezone.now, editable=False)
class Meta:
permissions = (
("manage_cohorts", "Can manage cohorts"),
)
def members(self):
members = []
for scc in self.signupcodecohort_set.select_related():
try:
scr = SignupCodeResult.objects.get(signup_code=scc.signup_code_id)
except SignupCodeResult.DoesNotExist:
user = None
else:
user = scr.user
members.append(
Member(
scc.signup_code.email,
scc.signup_code,
user,
bool(scc.signup_code.sent)
)
)
return members
def member_counts(self):
members = self.members()
return {
"total": len(members),
"users": len([m for m in members if m.user is not None]),
"pending": len([m.signup_code for m in members if not m.invited]),
}
def send_invitations(self):
for sc in [m.signup_code for m in self.members() if not m.invited]:
sc.send()
def __unicode__(self):
return self.name
class SignupCodeCohort(models.Model):
"""
fetch cohort of a given signup code
SignupCodeCohort.objects.select_related("cohort").get(signup_code__code="abc").cohort
list of people waiting NOT on the site already or invited
WaitingListEntry.objects.exclude(email__in=SignupCode.objects.values("email")).exclude(email__in=User.objects.values("email"))
"""
signup_code = models.OneToOneField(SignupCode)
cohort = models.ForeignKey(Cohort)
class UserCohort(models.Model):
"""
Upon signup we create an instance of this model associating the new user and their cohort
"""
user = models.OneToOneField(User)
cohort = models.ForeignKey(Cohort)
@receiver(user_signed_up)
def handle_user_signup(sender, **kwargs):
signup_code = kwargs["form"].cleaned_data["code"]
# fetch the cohort for the signup code
qs = SignupCodeCohort.objects.select_related("cohort")
try:
cohort = qs.get(signup_code__code=signup_code).cohort
# create a UserCohort for user association to a cohort
UserCohort.objects.create(user=kwargs["user"], cohort=cohort)
except SignupCodeCohort.DoesNotExist:
pass
|
{
"content_hash": "47e6090ae79759216fb8ed5a68398f4f",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 134,
"avg_line_length": 31.97991967871486,
"alnum_prop": 0.6389551676503831,
"repo_name": "pinax/django-waitinglist",
"id": "c4b488ef0875f10aa282906bc979336805a71591",
"size": "7963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waitinglist/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "482"
},
{
"name": "Python",
"bytes": "38700"
}
],
"symlink_target": ""
}
|
from airflow import DAG
from airflow.providers.plexus.operators.job import PlexusJobOperator
from airflow.utils.dates import days_ago
HOME = '/home/acc'
T3_PRERUN_SCRIPT = 'cp {home}/imdb/run_scripts/mlflow.sh {home}/ && chmod +x mlflow.sh'.format(home=HOME)
dag = DAG(
'test',
default_args={'owner': 'core scientific', 'retries': 1},
description='testing plexus operator',
start_date=days_ago(1),
schedule_interval='@once',
catchup=False,
)
t1 = PlexusJobOperator(
task_id='test',
job_params={
'name': 'test',
'app': 'MLFlow Pipeline 01',
'queue': 'DGX-2 (gpu:Tesla V100-SXM3-32GB)',
'num_nodes': 1,
'num_cores': 1,
'prerun_script': T3_PRERUN_SCRIPT,
},
dag=dag,
)
t1
|
{
"content_hash": "e2f2a86d6b5b8a09d89b6144fd40c733",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 105,
"avg_line_length": 24.580645161290324,
"alnum_prop": 0.6233595800524935,
"repo_name": "dhuang/incubator-airflow",
"id": "2f0a492a671228c60888504c3d2e68215e651303",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/plexus/example_dags/example_plexus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scorecard', '0006_auto_20150602_1821'),
]
operations = [
migrations.AddField(
model_name='indicatorscore',
name='comment',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='indicatorscore',
name='sources',
field=models.TextField(blank=True),
),
]
|
{
"content_hash": "9e2c755ae7660c67f6826d74c4d87d48",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 49,
"avg_line_length": 23.52173913043478,
"alnum_prop": 0.5748613678373382,
"repo_name": "sunlightlabs/mptindicators",
"id": "ceb4b653927e886d5bd57668f36d1284ed4c3df3",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mptindicators/scorecard/migrations/0007_auto_20150602_1915.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90996"
},
{
"name": "HTML",
"bytes": "61277"
},
{
"name": "JavaScript",
"bytes": "4057"
},
{
"name": "Python",
"bytes": "49529"
}
],
"symlink_target": ""
}
|
"""The nexia integration base entity."""
from aiopvapi.resources.shade import ATTR_TYPE
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
DEVICE_FIRMWARE,
DEVICE_MAC_ADDRESS,
DEVICE_MODEL,
DEVICE_NAME,
DEVICE_SERIAL_NUMBER,
DOMAIN,
FIRMWARE_BUILD,
FIRMWARE_IN_SHADE,
FIRMWARE_REVISION,
FIRMWARE_SUB_REVISION,
MANUFACTURER,
)
class HDEntity(CoordinatorEntity):
"""Base class for hunter douglas entities."""
def __init__(self, coordinator, device_info, room_name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator)
self._room_name = room_name
self._unique_id = unique_id
self._device_info = device_info
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def device_info(self):
"""Return the device_info of the device."""
firmware = self._device_info[DEVICE_FIRMWARE]
sw_version = f"{firmware[FIRMWARE_REVISION]}.{firmware[FIRMWARE_SUB_REVISION]}.{firmware[FIRMWARE_BUILD]}"
return {
"identifiers": {(DOMAIN, self._device_info[DEVICE_SERIAL_NUMBER])},
"connections": {
(dr.CONNECTION_NETWORK_MAC, self._device_info[DEVICE_MAC_ADDRESS])
},
"name": self._device_info[DEVICE_NAME],
"suggested_area": self._room_name,
"model": self._device_info[DEVICE_MODEL],
"sw_version": sw_version,
"manufacturer": MANUFACTURER,
}
class ShadeEntity(HDEntity):
"""Base class for hunter douglas shade entities."""
def __init__(self, coordinator, device_info, room_name, shade, shade_name):
"""Initialize the shade."""
super().__init__(coordinator, device_info, room_name, shade.id)
self._shade_name = shade_name
self._shade = shade
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(DOMAIN, self._shade.id)},
"name": self._shade_name,
"suggested_area": self._room_name,
"manufacturer": MANUFACTURER,
"via_device": (DOMAIN, self._device_info[DEVICE_SERIAL_NUMBER]),
}
if FIRMWARE_IN_SHADE not in self._shade.raw_data:
return device_info
firmware = self._shade.raw_data[FIRMWARE_IN_SHADE]
sw_version = f"{firmware[FIRMWARE_REVISION]}.{firmware[FIRMWARE_SUB_REVISION]}.{firmware[FIRMWARE_BUILD]}"
model = self._shade.raw_data[ATTR_TYPE]
for shade in self._shade.shade_types:
if shade.shade_type == model:
model = shade.description
break
device_info["sw_version"] = sw_version
device_info["model"] = model
return device_info
|
{
"content_hash": "7b0fb5355cebfaa9ae56ef5e80665ff3",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 114,
"avg_line_length": 32.84444444444444,
"alnum_prop": 0.6072395128552097,
"repo_name": "adrienbrault/home-assistant",
"id": "679e55e806c79761750ced997dafdc7ae7fcb18f",
"size": "2956",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/hunterdouglas_powerview/entity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
"""Support for fans through the SmartThings cloud API."""
from typing import Optional, Sequence
from pysmartthings import Capability
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
FanEntity,
)
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
VALUE_TO_SPEED = {0: SPEED_OFF, 1: SPEED_LOW, 2: SPEED_MEDIUM, 3: SPEED_HIGH}
SPEED_TO_VALUE = {v: k for k, v in VALUE_TO_SPEED.items()}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add fans for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsFan(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "fan")
]
)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
supported = [Capability.switch, Capability.fan_speed]
# Must have switch and fan_speed
if all(capability in capabilities for capability in supported):
return supported
class SmartThingsFan(SmartThingsEntity, FanEntity):
"""Define a SmartThings Fan."""
async def async_set_speed(self, speed: str):
"""Set the speed of the fan."""
value = SPEED_TO_VALUE[speed]
await self._device.set_fan_speed(value, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn the fan on."""
if speed is not None:
value = SPEED_TO_VALUE[speed]
await self._device.set_fan_speed(value, set_status=True)
else:
await self._device.switch_on(set_status=True)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the fan off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
@property
def is_on(self) -> bool:
"""Return true if fan is on."""
return self._device.status.switch
@property
def speed(self) -> str:
"""Return the current speed."""
return VALUE_TO_SPEED[self._device.status.fan_speed]
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
|
{
"content_hash": "69321734f80e0d74dc5c90f8fd88fe65",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 77,
"avg_line_length": 34.87640449438202,
"alnum_prop": 0.6543170103092784,
"repo_name": "Teagan42/home-assistant",
"id": "aad62aed486cf9c9013ea51fc293591ad9508e9d",
"size": "3104",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/smartthings/fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from distutils.version import LooseVersion
import pytest
from pandas.compat._optional import VERSIONS
import pandas as pd
from pandas.core.computation.engines import _engines
import pandas.core.computation.expr as expr
def test_compat():
# test we have compat with our version of nu
from pandas.core.computation.check import _NUMEXPR_INSTALLED
try:
import numexpr as ne
ver = ne.__version__
if LooseVersion(ver) < LooseVersion(VERSIONS["numexpr"]):
assert not _NUMEXPR_INSTALLED
else:
assert _NUMEXPR_INSTALLED
except ImportError:
pytest.skip("not testing numexpr version compat")
@pytest.mark.parametrize("engine", _engines)
@pytest.mark.parametrize("parser", expr._parsers)
def test_invalid_numexpr_version(engine, parser):
def testit():
a, b = 1, 2 # noqa
res = pd.eval("a + b", engine=engine, parser=parser)
assert res == 3
if engine == "numexpr":
try:
import numexpr as ne
except ImportError:
pytest.skip("no numexpr")
else:
if LooseVersion(ne.__version__) < LooseVersion(VERSIONS["numexpr"]):
with pytest.raises(ImportError):
testit()
else:
testit()
else:
testit()
|
{
"content_hash": "7af7473f50359a9073f542c71c044480",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 80,
"avg_line_length": 27.163265306122447,
"alnum_prop": 0.6168294515401953,
"repo_name": "kushalbhola/MyStuff",
"id": "b3fbd8c17d8bfa9c7a1397b928962d70bf7a7a7f",
"size": "1331",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/tests/computation/test_compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
}
|
""" Various kinds of icon widgets.
"""
from __future__ import absolute_import
from ...core.properties import abstract
from ...core.properties import Bool, Float, Enum
from ...core.enums import NamedIcon
from .widget import Widget
@abstract
class AbstractIcon(Widget):
""" An abstract base class for icon widgets. ``AbstractIcon``
is not generally useful to instantiate on its own.
"""
class Icon(AbstractIcon):
""" A "stock" icon based on FontAwesome.
"""
icon_name = Enum(NamedIcon, default="check", help="""
What icon to use. See http://fortawesome.github.io/Font-Awesome/icons/
for the list of available icons.
""")
size = Float(None, help="""
The size multiplier (1x, 2x, ..., 5x).
""")
flip = Enum("horizontal", "vertical", default=None, help="""
Optionally flip the icon horizontally or vertically.
""")
spin = Bool(False, help="""
Indicates a spinning (animated) icon. This value is ignored for
icons that do not support spinning.
""")
|
{
"content_hash": "d35ffbc3b8f88fd335be955b93acf564",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 26.384615384615383,
"alnum_prop": 0.6618075801749271,
"repo_name": "ptitjano/bokeh",
"id": "19e411eda29785740da6554f61edfcb6f1e0803f",
"size": "1029",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "bokeh/models/widgets/icons.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1710"
},
{
"name": "CSS",
"bytes": "406989"
},
{
"name": "CoffeeScript",
"bytes": "1073573"
},
{
"name": "HTML",
"bytes": "45510"
},
{
"name": "JavaScript",
"bytes": "12173"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "2083050"
},
{
"name": "Shell",
"bytes": "15584"
},
{
"name": "TypeScript",
"bytes": "25843"
}
],
"symlink_target": ""
}
|
import argparse
import os
class Config(object):
def __init__(self):
self.config = None
def parse(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'command',
help='Choose from setup, games, load, or upload.'
)
parser.add_argument(
'-p', '--path',
default='~/.gmr/',
help='Path to config directory. Defaults to ~/.gmr/'
)
parser.add_argument(
'-s', '--save-path',
default='~/.local/share/Aspyr/Sid Meier\'s Civilization 5/'
'Saves/hotseat/',
)
parser.add_argument(
'-f', '--save-file-name',
default='(GMR) Play this one!.Civ5Save',
)
self.config = parser.parse_args()
self.config.join = self.join
self.config.save_game_full_path = self.save_game_full_path
self.ensure_directory()
return self.config
def ensure_directory(self):
path = self.config.path = os.path.expanduser(self.config.path)
if not os.path.exists(path):
os.makedirs(path)
# These are tacked onto the returned config object in parse()
def join(self, name):
return os.path.join(self.config.path, name)
def save_game_full_path(self):
return os.path.expanduser(os.path.join(
self.config.save_path,
self.config.save_file_name,
))
|
{
"content_hash": "a911b19f575768d0bd699c0b82f8e68d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 26.6,
"alnum_prop": 0.5468215994531784,
"repo_name": "gak/giant-multiplayer-robot-helper",
"id": "c66f098174681e2b43862ad57f07187e5bf9a98a",
"size": "1463",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10845"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import logging
from oauthlib.common import add_params_to_uri
from oauthlib.common import urldecode as _urldecode
from oauthlib.oauth1 import (
SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER
)
import requests
from . import OAuth1
import sys
if sys.version > "3":
unicode = str
log = logging.getLogger(__name__)
def urldecode(body):
"""Parse query or json to python dictionary"""
try:
return _urldecode(body)
except:
import json
return json.loads(body)
class TokenRequestDenied(ValueError):
def __init__(self, message, status_code):
super(TokenRequestDenied, self).__init__(message)
self.status_code = status_code
class TokenMissing(ValueError):
def __init__(self, message, response):
super(TokenRequestDenied, self).__init__(message)
self.response = response
class VerifierMissing(ValueError):
pass
class OAuth1Session(requests.Session):
"""Request signing and convenience methods for the oauth dance.
What is the difference between OAuth1Session and OAuth1?
OAuth1Session actually uses OAuth1 internally and it's purpose is to assist
in the OAuth workflow through convenience methods to prepare authorization
URLs and parse the various token and redirection responses. It also provide
rudimentary validation of responses.
An example of the OAuth workflow using a basic CLI app and Twitter.
>>> # Credentials obtained during the registration.
>>> client_key = 'client key'
>>> client_secret = 'secret'
>>> callback_uri = 'https://127.0.0.1/callback'
>>>
>>> # Endpoints found in the OAuth provider API documentation
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>>
>>> oauth_session = OAuth1Session(client_key,client_secret=client_secret, callback_uri=callback_uri)
>>>
>>> # First step, fetch the request token.
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'kjerht2309u',
'oauth_token_secret': 'lsdajfh923874',
}
>>>
>>> # Second step. Follow this link and authorize
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
>>>
>>> # Third step. Fetch the access token
>>> redirect_response = raw_input('Paste the full redirect URL here.')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> # Done. You can now make OAuth requests.
>>> status_url = 'http://api.twitter.com/1/statuses/update.json'
>>> new_status = {'status': 'hello world!'}
>>> oauth_session.post(status_url, data=new_status)
<Response [200]>
"""
def __init__(self, client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None,
verifier=None,
client_class=None,
force_include_body=False,
**kwargs):
"""Construct the OAuth 1 session.
:param client_key: A client specific identifier.
:param client_secret: A client specific secret used to create HMAC and
plaintext signatures.
:param resource_owner_key: A resource owner key, also referred to as
request token or access token depending on
when in the workflow it is used.
:param resource_owner_secret: A resource owner secret obtained with
either a request or access token. Often
referred to as token secret.
:param callback_uri: The URL the user is redirect back to after
authorization.
:param signature_method: Signature methods determine how the OAuth
signature is created. The three options are
oauthlib.oauth1.SIGNATURE_HMAC (default),
oauthlib.oauth1.SIGNATURE_RSA and
oauthlib.oauth1.SIGNATURE_PLAIN.
:param signature_type: Signature type decides where the OAuth
parameters are added. Either in the
Authorization header (default) or to the URL
query parameters or the request body. Defined as
oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER,
oauthlib.oauth1.SIGNATURE_TYPE_QUERY and
oauthlib.oauth1.SIGNATURE_TYPE_BODY
respectively.
:param rsa_key: The private RSA key as a string. Can only be used with
signature_method=oauthlib.oauth1.SIGNATURE_RSA.
:param verifier: A verifier string to prove authorization was granted.
:param client_class: A subclass of `oauthlib.oauth1.Client` to use with
`requests_oauthlib.OAuth1` instead of the default
:param force_include_body: Always include the request body in the
signature creation.
:param **kwargs: Additional keyword arguments passed to `OAuth1`
"""
super(OAuth1Session, self).__init__()
self._client = OAuth1(client_key,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
callback_uri=callback_uri,
signature_method=signature_method,
signature_type=signature_type,
rsa_key=rsa_key,
verifier=verifier,
client_class=client_class,
force_include_body=force_include_body,
**kwargs)
self.auth = self._client
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
if self._client.signature_method == SIGNATURE_RSA:
# RSA only uses resource_owner_key
return bool(self._client.resource_owner_key)
else:
# other methods of authentication use all three pieces
return (
bool(self._client.client_secret) and
bool(self._client.resource_owner_key) and
bool(self._client.resource_owner_secret)
)
def authorization_url(self, url, request_token=None, **kwargs):
"""Create an authorization URL by appending request_token and optional
kwargs to url.
This is the second step in the OAuth 1 workflow. The user should be
redirected to this authorization URL, grant access to you, and then
be redirected back to you. The redirection back can either be specified
during client registration or by supplying a callback URI per request.
:param url: The authorization endpoint URL.
:param request_token: The previously obtained request token.
:param kwargs: Optional parameters to append to the URL.
:returns: The authorization URL with new parameters embedded.
An example using a registered default callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf'
>>> oauth_session.authorization_url(authorization_url, foo='bar')
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar'
An example using an explicit callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
"""
kwargs['oauth_token'] = request_token or self._client.client.resource_owner_key
log.debug('Adding parameters %s to url %s', kwargs, url)
return add_params_to_uri(url, kwargs.items())
def fetch_request_token(self, url, realm=None):
"""Fetch a request token.
This is the first step in the OAuth 1 workflow. A request token is
obtained by making a signed post request to url. The token is then
parsed from the application/x-www-form-urlencoded response and ready
to be used to construct an authorization url.
:param url: The request token endpoint URL.
:param realm: A list of realms to request access to.
:returns: The response in dict format.
Note that a previously set callback_uri will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
self._client.client.realm = ' '.join(realm) if realm else None
token = self._fetch_token(url)
log.debug('Resetting callback_uri and realm (not needed in next phase).')
self._client.client.callback_uri = None
self._client.client.realm = None
return token
def fetch_access_token(self, url, verifier=None):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if verifier:
self._client.client.verifier = verifier
if not getattr(self._client.client, 'verifier', None):
raise VerifierMissing('No client verifier has been set.')
token = self._fetch_token(url)
log.debug('Resetting verifier attribute, should not be used anymore.')
self._client.client.verifier = None
return token
def parse_authorization_response(self, url):
"""Extract parameters from the post authorization redirect response URL.
:param url: The full URL that resulted from the user being redirected
back from the OAuth provider to you, the client.
:returns: A dict of parameters extracted from the URL.
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
"""
log.debug('Parsing token from query part of url %s', url)
token = dict(urldecode(urlparse(url).query))
log.debug('Updating internal client token attribute.')
self._populate_attributes(token)
return token
def _populate_attributes(self, token):
if 'oauth_token' in token:
self._client.client.resource_owner_key = token['oauth_token']
else:
raise TokenMissing(
'Response does not contain a token: {resp}'.format(resp=token),
token,
)
if 'oauth_token_secret' in token:
self._client.client.resource_owner_secret = (
token['oauth_token_secret'])
if 'oauth_verifier' in token:
self._client.client.verifier = token['oauth_verifier']
def _fetch_token(self, url):
log.debug('Fetching token from %s using client %s', url, self._client.client)
r = self.post(url)
if r.status_code >= 400:
error = "Token request failed with code %s, response was '%s'."
raise TokenRequestDenied(error % (r.status_code, r.text), r.status_code)
log.debug('Decoding token from response "%s"', r.text)
try:
token = dict(urldecode(r.text))
except ValueError as e:
error = ("Unable to decode token from token response. "
"This is commonly caused by an unsuccessful request where"
" a non urlencoded error message is returned. "
"The decoding error was %s""" % e)
raise ValueError(error)
log.debug('Obtained token %s', token)
log.debug('Updating internal client attributes from token data.')
self._populate_attributes(token)
return token
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
"""
if 'Authorization' in prepared_request.headers:
# If we get redirected to a new host, we should strip out
# any authentication headers.
prepared_request.headers.pop('Authorization', True)
prepared_request.prepare_auth(self.auth)
return
|
{
"content_hash": "44ac0332b8498bfb2fd06453e59bb48f",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 145,
"avg_line_length": 43.40431266846361,
"alnum_prop": 0.6235484071291064,
"repo_name": "ericoporto/Chove-Agora",
"id": "3097a2e3f51f3784898414da6cbf570fe02c50c4",
"size": "16103",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/requests_oauthlib/oauth1_session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3071"
},
{
"name": "Makefile",
"bytes": "3419"
},
{
"name": "Python",
"bytes": "1450035"
},
{
"name": "Shell",
"bytes": "1959"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WilliamWallace'
copyright = u'2013, Gillian'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_TemplateModuleNames = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TemplateClassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'WilliamWallace.tex', u'WilliamWallace Documentation',
u'Gillian', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'TemplateClass', u'WilliamWallace Documentation',
[u'Gillian'], 1)
]
|
{
"content_hash": "03590499df48ae92c57dc7fe6a35bbb8",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 32.714285714285715,
"alnum_prop": 0.7075741605179943,
"repo_name": "gilliM/wallace",
"id": "21103b183c945ec2c546537cc52568695a778d76",
"size": "7066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WilliamWallace/help/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4126"
},
{
"name": "Makefile",
"bytes": "11909"
},
{
"name": "Python",
"bytes": "97468"
},
{
"name": "QML",
"bytes": "1564"
},
{
"name": "Shell",
"bytes": "2447"
}
],
"symlink_target": ""
}
|
import unittest
from ..a import add
from ..b import multiply
# what s
class TestAB(unittest.TestCase):
def test_add_and_multiply(self):
"""test adding and multiplying at the same time"""
self.assertEqual(add(2, 3), 5)
self.assertEqual(multiply(2, 3), 6)
|
{
"content_hash": "b7f3d2364de7505d6bc7533468314beb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 58,
"avg_line_length": 25.818181818181817,
"alnum_prop": 0.6619718309859155,
"repo_name": "public/testmon",
"id": "79c3e0c52969a3de8c04313ecbe31ee3fe9062ad",
"size": "284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exampleproject/tests/test_ab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80323"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from mezzanine.pages.page_processors import processor_for
from hs_core.models import BaseResource, ResourceManager, resource_processor,\
CoreMetaData, AbstractMetaDataElement
from lxml import etree
# Create your models here.
class OriginalFileInfo(AbstractMetaDataElement):
term = 'OriginalFileInfo'
fileTypeEnum = (
(None, 'Unknown'),
("SHP", "ESRI Shapefiles"),
("ZSHP", "Zipped ESRI Shapefiles"),
("KML", "KML"),
("KMZ", "KMZ"),
("GML", "GML"),
("SQLITE", "SQLite")
)
fileType = models.TextField(max_length=128, choices=fileTypeEnum, default=None)
baseFilename = models.TextField(max_length=256, null=False, blank=False)
fileCount = models.IntegerField(null=False, blank=False, default=0)
filenameString = models.TextField(null=True, blank=True)
class Meta:
# OriginalFileInfo element is not repeatable
unique_together = ("content_type", "object_id")
# Define original spatial coverage metadata info
class OriginalCoverage(AbstractMetaDataElement):
term = 'OriginalCoverage'
northlimit = models.FloatField(null=False, blank=False)
southlimit = models.FloatField(null=False, blank=False)
westlimit = models.FloatField(null=False, blank=False)
eastlimit = models.FloatField(null=False, blank=False)
projection_string = models.TextField(null=True, blank=True)
projection_name = models.TextField(max_length=256, null=True, blank=True)
datum = models.TextField(max_length=256, null=True, blank=True)
unit = models.TextField(max_length=256, null=True, blank=True)
class Meta:
# OriginalCoverage element is not repeatable
unique_together = ("content_type", "object_id")
class FieldInformation(AbstractMetaDataElement):
term = 'FieldInformation'
fieldName = models.CharField(max_length=128, null=False, blank=False)
fieldType = models.CharField(max_length=128, null=False, blank=False)
fieldTypeCode = models.CharField(max_length=50, null=True, blank=True)
fieldWidth = models.IntegerField(null=True, blank=True)
fieldPrecision = models.IntegerField(null=True, blank=True)
class GeometryInformation(AbstractMetaDataElement):
term = 'GeometryInformation'
featureCount = models.IntegerField(null=False, blank=False, default=0)
geometryType = models.CharField(max_length=128, null=False, blank=False)
class Meta:
# GeometryInformation element is not repeatable
unique_together = ("content_type", "object_id")
# Define the Geographic Feature
class GeographicFeatureResource(BaseResource):
objects = ResourceManager("GeographicFeatureResource")
@property
def metadata(self):
md = GeographicFeatureMetaData()
return self._get_metadata(md)
@classmethod
def get_supported_upload_file_types(cls):
# See Shapefile format:
# http://resources.arcgis.com/en/help/main/10.2/index.html#//005600000003000000
return (".zip", ".shp", ".shx", ".dbf", ".prj",
".sbx", ".sbn", ".cpg", ".xml", ".fbn",
".fbx", ".ain", ".aih", ".atx", ".ixs",
".mxs")
# add resource-specific HS terms
def get_hs_term_dict(self):
# get existing hs_term_dict from base class
hs_term_dict = super(GeographicFeatureResource, self).get_hs_term_dict()
geometryinformation = self.metadata.geometryinformation.all().first()
if geometryinformation is not None:
hs_term_dict["HS_GFR_FEATURE_COUNT"] = geometryinformation.featureCount
else:
hs_term_dict["HS_GFR_FEATURE_COUNT"] = 0
return hs_term_dict
class Meta:
verbose_name = 'Geographic Feature (ESRI Shapefiles)'
proxy = True
processor_for(GeographicFeatureResource)(resource_processor)
# define the GeographicFeatureMetaData metadata
class GeographicFeatureMetaData(CoreMetaData):
geometryinformation = GenericRelation(GeometryInformation)
fieldinformation = GenericRelation(FieldInformation)
originalcoverage = GenericRelation(OriginalCoverage)
originalfileinfo = GenericRelation(OriginalFileInfo)
@property
def resource(self):
return GeographicFeatureResource.objects.filter(object_id=self.id).first()
@classmethod
def get_supported_element_names(cls):
# get the names of all core metadata elements
elements = super(GeographicFeatureMetaData, cls).get_supported_element_names()
# add the name of any additional element to the list
elements.append('FieldInformation')
elements.append('OriginalCoverage')
elements.append('GeometryInformation')
elements.append('OriginalFileInfo')
return elements
def has_all_required_elements(self):
if self.get_required_missing_elements():
return False
return True
def get_required_missing_elements(self): # show missing required meta
missing_required_elements = super(GeographicFeatureMetaData, self).\
get_required_missing_elements()
if not (self.coverages.all().filter(type='box').first() or
self.coverages.all().filter(type='point').first()):
missing_required_elements.append('Spatial Coverage')
if not self.originalcoverage.all().first():
missing_required_elements.append('Spatial Reference')
if not self.geometryinformation.all().first():
missing_required_elements.append('Geometry Information')
if not self.originalfileinfo.all().first():
missing_required_elements.append('Resource File Information')
return missing_required_elements
def get_xml(self, pretty_print=True):
# get the xml string representation of the core metadata elements
xml_string = super(GeographicFeatureMetaData, self).get_xml(pretty_print=False)
# create an etree xml object
RDF_ROOT = etree.fromstring(xml_string)
# get root 'Description' element that contains all other elements
container = RDF_ROOT.find('rdf:Description', namespaces=self.NAMESPACES)
if self.originalfileinfo.all().first():
originalfileinfo_fields = ['fileType', 'fileCount', 'baseFilename', 'filenameString']
self.add_metadata_element_to_xml(container,
self.originalfileinfo.all().first(),
originalfileinfo_fields)
if self.geometryinformation.all().first():
geometryinformation_fields = ['geometryType', 'featureCount']
self.add_metadata_element_to_xml(container,
self.geometryinformation.all().first(),
geometryinformation_fields)
for field_info in self.fieldinformation.all():
field_info_fields = ['fieldName', 'fieldType',
'fieldTypeCode', 'fieldWidth', 'fieldPrecision']
self.add_metadata_element_to_xml(container, field_info, field_info_fields)
if self.originalcoverage.all().first():
ori_coverage = self.originalcoverage.all().first()
cov = etree.SubElement(container, '{%s}spatialReference' % self.NAMESPACES['hsterms'])
cov_term = '{%s}' + 'box'
coverage_terms = etree.SubElement(cov, cov_term % self.NAMESPACES['hsterms'])
rdf_coverage_value = etree.SubElement(coverage_terms,
'{%s}value' % self.NAMESPACES['rdf'])
# original coverage is of box type
cov_value = 'northlimit=%s; eastlimit=%s; southlimit=%s; westlimit=%s; units=%s' \
% (ori_coverage.northlimit, ori_coverage.eastlimit,
ori_coverage.southlimit, ori_coverage.westlimit, ori_coverage.unit)
cov_value = cov_value + '; projection_name=%s' % \
ori_coverage.projection_name + '; datum=%s' % \
ori_coverage.datum + '; projection_string=%s' % \
ori_coverage.projection_string
rdf_coverage_value.text = cov_value
return etree.tostring(RDF_ROOT, pretty_print=pretty_print)
def delete_all_elements(self):
super(GeographicFeatureMetaData, self).delete_all_elements()
self.geometryinformation.all().delete()
self.fieldinformation.all().delete()
self.originalcoverage.all().delete()
self.originalfileinfo.all().delete()
|
{
"content_hash": "e94ae88c8f2abb0b9078c7899309402f",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 98,
"avg_line_length": 42.17619047619048,
"alnum_prop": 0.6410748560460653,
"repo_name": "RENCI/xDCIShare",
"id": "a2adf8c5410132b2b3046c32e14a188908e754d1",
"size": "8857",
"binary": false,
"copies": "2",
"ref": "refs/heads/xdci-develop",
"path": "hs_geographic_feature_resource/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "381782"
},
{
"name": "HTML",
"bytes": "964877"
},
{
"name": "JavaScript",
"bytes": "2011819"
},
{
"name": "Python",
"bytes": "4334769"
},
{
"name": "R",
"bytes": "4472"
},
{
"name": "Shell",
"bytes": "52665"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
}
|
"""
Context managers for use with the ``with`` statement.
.. note:: When using Python 2.5, you will need to start your fabfile
with ``from __future__ import with_statement`` in order to make use of
the ``with`` statement (which is a regular, non ``__future__`` feature of
Python 2.6+.)
"""
from contextlib import contextmanager, nested
import sys
from fabric.state import env, output, win32
if not win32:
import termios
import tty
def _set_output(groups, which):
"""
Refactored subroutine used by ``hide`` and ``show``.
"""
# Preserve original values, pull in new given value to use
previous = {}
for group in output.expand_aliases(groups):
previous[group] = output[group]
output[group] = which
# Yield control
yield
# Restore original values
output.update(previous)
@contextmanager
def show(*groups):
"""
Context manager for setting the given output ``groups`` to True.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to True for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to turn on debug output (which is typically off by default)::
def my_task():
with show('debug'):
run('ls /var/www')
As almost all output groups are displayed by default, `show` is most useful
for turning on the normally-hidden ``debug`` group, or when you know or
suspect that code calling your own code is trying to hide output with
`hide`.
"""
return _set_output(groups, True)
@contextmanager
def hide(*groups):
"""
Context manager for setting the given output ``groups`` to False.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to False for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to hide the "[hostname] run:" status lines, as well as
preventing printout of stdout and stderr, one might use `hide` as follows::
def my_task():
with hide('running', 'stdout', 'stderr'):
run('ls /var/www')
"""
return _set_output(groups, False)
@contextmanager
def _setenv(**kwargs):
"""
Context manager temporarily overriding ``env`` with given key/value pairs.
This context manager is used internally by `settings` and is not intended
to be used directly.
"""
previous = {}
new = []
for key, value in kwargs.iteritems():
if key in env:
previous[key] = env[key]
else:
new.append(key)
env[key] = value
try:
yield
finally:
env.update(previous)
for key in new:
del env[key]
def settings(*args, **kwargs):
"""
Nest context managers and/or override ``env`` variables.
`settings` serves two purposes:
* Most usefully, it allows temporary overriding/updating of ``env`` with
any provided keyword arguments, e.g. ``with settings(user='foo'):``.
Original values, if any, will be restored once the ``with`` block closes.
* In addition, it will use `contextlib.nested`_ to nest any given
non-keyword arguments, which should be other context managers, e.g.
``with settings(hide('stderr'), show('stdout')):``.
.. _contextlib.nested: http://docs.python.org/library/contextlib.html#contextlib.nested
These behaviors may be specified at the same time if desired. An example
will hopefully illustrate why this is considered useful::
def my_task():
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True
):
if run('ls /etc/lsb-release'):
return 'Ubuntu'
elif run('ls /etc/redhat-release'):
return 'RedHat'
The above task executes a `run` statement, but will warn instead of
aborting if the ``ls`` fails, and all output -- including the warning
itself -- is prevented from printing to the user. The end result, in this
scenario, is a completely silent task that allows the caller to figure out
what type of system the remote host is, without incurring the handful of
output that would normally occur.
Thus, `settings` may be used to set any combination of environment
variables in tandem with hiding (or showing) specific levels of output, or
in tandem with any other piece of Fabric functionality implemented as a
context manager.
"""
managers = list(args)
if kwargs:
managers.append(_setenv(**kwargs))
return nested(*managers)
def cd(path):
"""
Context manager that keeps directory state when calling remote operations.
Any calls to `run`, `sudo`, `get`, or `put` within the wrapped block will
implicitly have a string similar to ``"cd <path> && "`` prefixed in order
to give the sense that there is actually statefulness involved.
.. note::
`cd` only affects *remote* paths -- to modify *local* paths, use
`~fabric.context_managers.lcd`.
Because use of `cd` affects all such invocations, any code making use of
those operations, such as much of the ``contrib`` section, will also be
affected by use of `cd`.
Like the actual 'cd' shell builtin, `cd` may be called with relative paths
(keep in mind that your default starting directory is your remote user's
``$HOME``) and may be nested as well.
Below is a "normal" attempt at using the shell 'cd', which doesn't work due
to how shell-less SSH connections are implemented -- state is **not** kept
between invocations of `run` or `sudo`::
run('cd /var/www')
run('ls')
The above snippet will list the contents of the remote user's ``$HOME``
instead of ``/var/www``. With `cd`, however, it will work as expected::
with cd('/var/www'):
run('ls') # Turns into "cd /var/www && ls"
Finally, a demonstration (see inline comments) of nesting::
with cd('/var/www'):
run('ls') # cd /var/www && ls
with cd('website1'):
run('ls') # cd /var/www/website1 && ls
.. note::
This context manager is currently implemented by appending to (and, as
always, restoring afterwards) the current value of an environment
variable, ``env.cwd``. However, this implementation may change in the
future, so we do not recommend manually altering ``env.cwd`` -- only
the *behavior* of `cd` will have any guarantee of backwards
compatibility.
.. note::
Space characters will be escaped automatically to make dealing with
such directory names easier.
.. versionchanged:: 1.0
Applies to `get` and `put` in addition to the command-running
operations.
.. seealso:: `~fabric.context_managers.lcd`
"""
return _change_cwd('cwd', path)
def lcd(path):
"""
Context manager for updating local current working directory.
This context manager is identical to `~fabric.context_managers.cd`, except
that it changes a different env var (`lcwd`, instead of `cwd`) and thus
only affects the invocation of `~fabric.operations.local` and the local
arguments to `~fabric.operations.get`/`~fabric.operations.put`.
.. versionadded:: 1.0
"""
return _change_cwd('lcwd', path)
def _change_cwd(which, path):
path = path.replace(' ', '\ ')
if env.get(which) and not path.startswith('/'):
new_cwd = env.get(which) + '/' + path
else:
new_cwd = path
return _setenv(**{which: new_cwd})
def path(path, behavior='append'):
"""
Append the given ``path`` to the PATH used to execute any wrapped commands.
Any calls to `run` or `sudo` within the wrapped block will implicitly have
a string similar to ``"PATH=$PATH:<path> "`` prepended before the given
command.
You may customize the behavior of `path` by specifying the optional
``behavior`` keyword argument, as follows:
* ``'append'``: append given path to the current ``$PATH``, e.g.
``PATH=$PATH:<path>``. This is the default behavior.
* ``'prepend'``: prepend given path to the current ``$PATH``, e.g.
``PATH=<path>:$PATH``.
* ``'replace'``: ignore previous value of ``$PATH`` altogether, e.g.
``PATH=<path>``.
.. note::
This context manager is currently implemented by modifying (and, as
always, restoring afterwards) the current value of environment
variables, ``env.path`` and ``env.path_behavior``. However, this
implementation may change in the future, so we do not recommend
manually altering them directly.
.. versionadded:: 1.0
"""
return _setenv(path=path, path_behavior=behavior)
def prefix(command):
"""
Prefix all wrapped `run`/`sudo` commands with given command plus ``&&``.
This is nearly identical to `~fabric.operations.cd`, except that nested
invocations append to a list of command strings instead of modifying a
single string.
Most of the time, you'll want to be using this alongside a shell script
which alters shell state, such as ones which export or alter shell
environment variables.
For example, one of the most common uses of this tool is with the
``workon`` command from `virtualenvwrapper
<http://www.doughellmann.com/projects/virtualenvwrapper/>`_::
with prefix('workon myvenv'):
run('./manage.py syncdb')
In the above snippet, the actual shell command run would be this::
$ workon myvenv && ./manage.py syncdb
This context manager is compatible with `~fabric.context_managers.cd`, so
if your virtualenv doesn't ``cd`` in its ``postactivate`` script, you could
do the following::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
Which would result in executions like so::
$ cd /path/to/app && workon myvenv && ./manage.py syncdb
$ cd /path/to/app && workon myvenv && ./manage.py loaddata myfixture
Finally, as alluded to near the beginning,
`~fabric.context_managers.prefix` may be nested if desired, e.g.::
with prefix('workon myenv'):
run('ls')
with prefix('source /some/script'):
run('touch a_file')
The result::
$ workon myenv && ls
$ workon myenv && source /some/script && touch a_file
Contrived, but hopefully illustrative.
"""
return _setenv(command_prefixes=env.command_prefixes + [command])
@contextmanager
def char_buffered(pipe):
"""
Force local terminal ``pipe`` be character, not line, buffered.
Only applies on Unix-based systems; on Windows this is a no-op.
"""
if win32 or not pipe.isatty():
yield
else:
old_settings = termios.tcgetattr(pipe)
tty.setcbreak(pipe)
try:
yield
finally:
termios.tcsetattr(pipe, termios.TCSADRAIN, old_settings)
|
{
"content_hash": "841e36345113b232ee3f516fbb884654",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 91,
"avg_line_length": 33.69642857142857,
"alnum_prop": 0.6399929341105812,
"repo_name": "fitoria/fabric",
"id": "2bf81d7831593fddb2389a6a58a07b3ed1833b61",
"size": "11322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabric/context_managers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
import cherrypy
from server.Concept import Concept, Concepts
from server.Description import Description, Descriptions, DescriptionsForConcept, ConceptForDescription, \
PreferredDescriptionForConcept, FSNForConcept, ConceptBase
from server.Relationship import Relationship, Relationships, RelationshipsForSource, RelationshipsForTarget, \
RelationshipsForPredicate
from server.Language import LanguagesForConcept, LanguagesForDescription, Language
from server.SimpleRefset import SimpleRefsetByComponent, SimpleRefSet
from server.SimpleMap import SimpleMapByMapId, SimpleMapForSource, SimpleMapForTarget
from server.ComplexMap import ComplexMapById, ComplexMapForSource, ComplexMapForTarget
from server.Changeset import Changeset
from server.OtherRefsets import ModuleDependency
from server.GenericRefset import GenericRefset
from server.Server import ServerConf
from server.Root import Root
from auth.ihtsdoauth import License
# Note: This function requires the python routes package - install with PIP
dispatcher = cherrypy.dispatch.RoutesDispatcher()
class Resource:
controllers = {}
def __init__(self, path, controller, method='GET', action='default'):
""" Construct a path. Paths are evaluated sequentially, so the most specific must occur first. Also note that
both this function *and* CherryPy map the path to a controller and then map the HTTP methods to functions,
meaning that you can't send a GET to one class and a PUT to another
:param path: relative path to match. Requests format
:param controller: Class to instantiate
:param method: method to match
:param action: action to invoke based on method
"""
self.controllers.setdefault(controller, controller())
dispatcher.connect(path, path, controller=self.controllers[controller], action=action,
conditions=dict(method=[method]))
print("Connecting resources")
resources = [Resource(r'/', Root, action='index'),
Resource(r'/config', ServerConf),
Resource(r'/status', ServerConf, action='status'),
Resource(r'/license', License, action='index'),
Resource(r'/submit', License, action='submit', method='POST'),
Resource(r'/refset', GenericRefset, action='index'),
Resource(r'/refset/:uuid', GenericRefset),
Resource(r'/concepts', Concepts, action='index'),
Resource(r'/concepts/:after', Concepts),
Resource(r'/concepts/', Concepts),
Resource(r'/concept/languages', LanguagesForConcept, action='index'),
Resource(r'/concept/prefdescription', PreferredDescriptionForConcept, action='index'),
Resource(r'/concept/descriptions', DescriptionsForConcept, action='index'),
Resource(r'/concept/:concept/descriptions/:matchvalue', DescriptionsForConcept),
Resource(r'/concept/:concept/descriptions/', DescriptionsForConcept, action='index'),
Resource(r'/concept/:concept/descriptions', DescriptionsForConcept),
Resource(r'/concept/:concept/languages', LanguagesForConcept),
Resource(r'/concept/:concept/prefdescription', PreferredDescriptionForConcept),
Resource(r'/concept/:concept/fsn', FSNForConcept),
Resource(r'/concept/:concept/base', ConceptBase),
Resource(r'/concept/:concept', Concept, action='update', method='PUT'),
Resource(r'/concept/:concept', Concept, action='delete', method='DELETE'),
Resource(r'/concept/:concept', Concept),
Resource(r'/concept', Concept, action='new', method='POST'),
Resource(r'/concept', Concept, action='index'),
Resource(r'/descriptions/:matchvalue', Descriptions),
Resource(r'/descriptions/', Descriptions),
Resource(r'/descriptions', Descriptions, action='index'),
Resource(r'/description/concept', ConceptForDescription, action='index'),
Resource(r'/description/languages', LanguagesForDescription, action='index'),
Resource(r'/description/:desc/concept/', ConceptForDescription),
Resource(r'/description/:desc/concept', ConceptForDescription),
Resource(r'/description/:desc/languages', LanguagesForDescription),
Resource(r'/description/:desc', Description, action='update', method='PUT'),
Resource(r'/description/:desc', Description, action='delete', method='DELETE'),
Resource(r'/description/:desc', Description),
Resource(r'/description', Description, action='new', method='POST'),
Resource(r'/description', Description, action='index'),
Resource(r'/relationship/:rel', Relationship),
Resource(r'/relationship', Relationship, action='index'),
Resource(r'/relationships/source/:source', RelationshipsForSource),
Resource(r'/relationships/source', RelationshipsForSource, action='index'),
Resource(r'/relationships/predicate/:predicate', RelationshipsForPredicate),
Resource(r'/relationships/predicate', RelationshipsForPredicate, action='index'),
Resource(r'/relationships/target/:target', RelationshipsForTarget),
Resource(r'/relationships/target', RelationshipsForTarget, action='index'),
Resource(r'/relationships/:value', Relationships),
Resource(r'/relationship/:rid', Relationship, action='delete', method='DELETE'),
Resource(r'/relationship/source/:source/target/:target', Relationship, action='new', method='POST'),
Resource(r'/relationship/source/:source/predicate/:predicate/target/:target', Relationship,
action='new', method='POST'),
Resource(r'/relationships/', Relationships),
Resource(r'/relationships', Relationships, action='index'),
Resource(r'/language/:uuid', Language),
Resource(r'/language', Language, action='index'),
Resource(r'/moduledependency/:uuid', ModuleDependency),
Resource(r'/moduledependency', ModuleDependency, action='index'),
# Note: there can't be a refset called "component" -- so be it
Resource(r'/simplerefset/component/:component', SimpleRefsetByComponent),
Resource(r'/simplerefset/component', SimpleRefsetByComponent, action='index'),
Resource(r'/simplerefset/:refset/component/:component', SimpleRefSet, action='update', method='PUT'),
Resource(r'/simplerefset/:refset/component/:component', SimpleRefSet, action='delete', method='DELETE'),
Resource(r'/simplerefset/:refset/component/:component', SimpleRefSet),
# This allows for multiple components (component=a b c...)
Resource(r'/simplerefset/:refset', SimpleRefSet, action='update', method='PUT'),
Resource(r'/simplerefset/:refset', SimpleRefSet, action='delete', method='DELETE'),
Resource(r'/simplerefset/:refset', SimpleRefSet),
Resource(r'/simplerefset/', SimpleRefSet, action='new', method='POST'),
Resource(r'/simplerefset/', SimpleRefSet),
Resource(r'/simplerefset', SimpleRefSet, action='new', method='POST'),
Resource(r'/simplerefset', SimpleRefSet, action='index'),
Resource(r'/simplemap/source/:component', SimpleMapForSource),
Resource(r'/simplemap/:refset/source/:component', SimpleMapForSource),
Resource(r'/simplemap/source', SimpleMapForSource, action='index'),
Resource(r'/simplemap/target/:target', SimpleMapForTarget),
Resource(r'/simplemap/:refset/target/:target', SimpleMapForTarget),
Resource(r'/simplemap/target', SimpleMapForTarget, action='index'),
Resource(r'/simplemap/:refset', SimpleMapByMapId),
Resource(r'/simplemap', SimpleMapByMapId, action='index'),
Resource(r'/simplemap/', SimpleMapByMapId),
Resource(r'/complexmap/source/:component', ComplexMapForSource),
Resource(r'/complexmap/:refset/source/:component', ComplexMapForSource),
Resource(r'/complexmap/source', ComplexMapForSource, action='index'),
Resource(r'/complexmap/target/:target', ComplexMapForTarget),
Resource(r'/complexmap/:refset/target/:target', ComplexMapForTarget),
Resource(r'/complexmap/target', ComplexMapForTarget, action='index'),
Resource(r'/complexmap/:refset', ComplexMapById),
Resource(r'/complexmap', ComplexMapById, action='index'),
Resource(r'/complexmap/', ComplexMapById),
Resource(r'/changeset/:changeset/commit', Changeset, action='commit', method='PUT'),
Resource(r'/changeset/:changeset', Changeset, action='update', method='PUT'),
Resource(r'/changeset/:changeset', Changeset, action='delete', method='DELETE'),
Resource(r'/changeset/:changeset/details', Changeset, action='details'),
Resource(r'/changeset/:changeset/uuid', Changeset, action='uuid'),
Resource(r'/changeset/:changeset', Changeset),
Resource(r'/changeset/:csname', Changeset, action='new', method='POST'),
Resource(r'/changeset', Changeset, action='index'),
Resource(r'/changeset/', Changeset),
Resource(r'/changeset', Changeset, action='new', method='POST'),
Resource(r'/changesets', Changeset, action='list'), ]
|
{
"content_hash": "94eb74840e5756b1c26dc12020902ba0",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 118,
"avg_line_length": 64.22,
"alnum_prop": 0.6662514273850306,
"repo_name": "cts2/rf2service",
"id": "8e8049510689d337fb049a92c9154231ef981853",
"size": "11211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/RESTDispatcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "23785"
},
{
"name": "JavaScript",
"bytes": "3025"
},
{
"name": "Python",
"bytes": "187489"
}
],
"symlink_target": ""
}
|
from otp.level import LevelMgr
import FactoryUtil
from toontown.toonbase.ToonPythonUtil import Functor
from toontown.toonbase import ToontownGlobals
class FactoryLevelMgr(LevelMgr.LevelMgr):
InterestingLocations = [(((-866, -272, -40), -101),
((-662, -242, 7.5), 0),
((-20, -180, 20), 0),
((-249, 258, 111), 0),
((318, 241, 115), -16),
((-251, 241, 109), -180),
((296, 292, 703), 56),
((-740, 122, 28), 90),
((210, -270, 38), -90)), (((20, 21, 0), 0), ((3, 404, 39), -16), ((-496, 358, 5), 0))]
def __init__(self, level, entId):
LevelMgr.LevelMgr.__init__(self, level, entId)
if config.GetBool('want-factory-lifter', 0):
self.toonLifter = FactoryUtil.ToonLifter('f3')
self.callSetters('farPlaneDistance')
self.geom.reparentTo(render)
oilRoomOil = self.geom.find('**/oilroom/room/geometry_oilroom/*oil')
oilRoomFloor = self.geom.find('**/oilroom/room/geometry_oilroom/*platform')
if oilRoomOil and not oilRoomOil.isEmpty() and oilRoomFloor and not oilRoomFloor.isEmpty():
oilRoomOil.setBin('background', 10)
oilRoomFloor.setBin('background', 11)
def destroy(self):
if hasattr(self, 'toonLifter'):
self.toonLifter.destroy()
del self.toonLifter
LevelMgr.LevelMgr.destroy(self)
def setFarPlaneDistance(self, farPlaneDistance):
base.camLens.setNearFar(ToontownGlobals.DefaultCameraNear, farPlaneDistance)
if __dev__:
def setWantDoors(self, wantDoors):
self.wantDoors = wantDoors
messenger.send('wantDoorsChanged')
|
{
"content_hash": "c8c58d557f30f4d5628b5146332af0bc",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 99,
"avg_line_length": 39.404761904761905,
"alnum_prop": 0.6163141993957704,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "566ce5bc741d0c2bb61b6894ce560b8304bdd089",
"size": "1655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/coghq/FactoryLevelMgr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
}
|
from buildHTMLSection import Document
from buildHTMLSection import Requirements
from buildHTMLSection import Objective
from buildHTMLSection import findElementById
import sys
import os
class TraceTable(Document):
def __init__ (self,template,target,information):
super(TraceTable,self).__init__(template)
nodes = self.dom.childNodes
element = findElementById(nodes,"traceTable")
for key in sorted(information):
rowElement = self.dom.createElement("tr")
cellElement = self.dom.createElement("td")
cellElement.appendChild(self.dom.createTextNode(key))
rowElement.appendChild(cellElement)
cellElement = self.dom.createElement("td")
informationString = information[key].replace(" ","")
informationList = informationString.split(", ")
informationString = ",".join(sorted(informationList))
cellElement.appendChild(self.dom.createTextNode(informationString))
rowElement.appendChild(cellElement)
element.appendChild(rowElement)
# write table of contents
targetFile = open(target,"w")
targetFile.write(self.dom.toxml());
targetFile.close()
def main():
bookName = str(sys.argv[1])
bookFile = open(bookName,"r")
line = bookFile.readline()
sections = []
objectives = {}
requirements = Requirements();
while line != "":
line = line.strip()
sections.append(line)
objectives[line] = Objective(line,requirements)
line = bookFile.readline()
requirementsToTestCase = {}
testCaseToRequirements = {}
for category in objectives:
for testID in objectives[category].requirementsInThisTest:
requirementList = objectives[category].getTestRequirementIDs(testID)
for requirementID in requirementList:
if not requirementID in requirementsToTestCase:
requirementsToTestCase[requirementID] = testID
else:
requirementsToTestCase[requirementID] = requirementsToTestCase[requirementID] + ", " + testID
if not testID in testCaseToRequirements:
testCaseToRequirements[testID] = requirementID
else:
testCaseToRequirements[testID] = testCaseToRequirements[testID] + ", " + requirementID
TraceTable("../tools_configuration/REQ_TEST_ID_TEMPLATE.html","req_test_id.html",requirementsToTestCase)
TraceTable("../tools_configuration/TEST_ID_REQ_TEMPLATE.html","test_id_req.html",testCaseToRequirements)
if __name__ == "__main__":
main()
|
{
"content_hash": "f1732cb13c1343b292d8566e9aac2a0d",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 113,
"avg_line_length": 31.928571428571427,
"alnum_prop": 0.6517524235645041,
"repo_name": "mbroihier/tfw",
"id": "ad2e3a5e4c88e65340bad8462d5880d93501c8ec",
"size": "2701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools_python3/buildHTMLTraceTables.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "413533"
},
{
"name": "CSS",
"bytes": "161"
},
{
"name": "HTML",
"bytes": "2204"
},
{
"name": "Java",
"bytes": "41587"
},
{
"name": "JavaScript",
"bytes": "5398"
},
{
"name": "Python",
"bytes": "47535"
}
],
"symlink_target": ""
}
|
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class PacketCapturesOperations(object):
"""PacketCapturesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def _create_initial(
self, resource_group_name, network_watcher_name, packet_capture_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PacketCapture')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PacketCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, network_watcher_name, packet_capture_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture
operation.
:type parameters: ~azure.mgmt.network.v2017_11_01.models.PacketCapture
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns PacketCaptureResult or
ClientRawResponse<PacketCaptureResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.PacketCaptureResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.PacketCaptureResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PacketCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def get(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PacketCaptureResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.PacketCaptureResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def _delete_initial(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def _stop_initial(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.stop.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def stop(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'}
def _get_status_initial(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_status(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture
session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
PacketCaptureQueryStatusResult or
ClientRawResponse<PacketCaptureQueryStatusResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.PacketCaptureQueryStatusResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.PacketCaptureQueryStatusResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'}
def list(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PacketCaptureResult
:rtype:
~azure.mgmt.network.v2017_11_01.models.PacketCaptureResultPaged[~azure.mgmt.network.v2017_11_01.models.PacketCaptureResult]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PacketCaptureResultPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PacketCaptureResultPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'}
|
{
"content_hash": "e2a860fbd9a13422639cbf5590c9f886",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 215,
"avg_line_length": 49.647495361781075,
"alnum_prop": 0.6609118086696562,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "3aff82b28741ab3d52076c811d29b430f7b887a6",
"size": "27234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/packet_captures_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="cone", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "53f6d0e82030898f4874915f86eeafc5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 36.90909090909091,
"alnum_prop": 0.6305418719211823,
"repo_name": "plotly/plotly.py",
"id": "48110ff964a3383ebddd52060509d3b8b0c2bbd4",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/cone/_hoverinfosrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
Test cases format for time space networks and jointed dispatch of time and space networks
"""
F_BUS = 0
T_BUS = 1
TIME = 2
BUS_ID = 0
D = 1
LOCATION = 2
|
{
"content_hash": "76011fbdcb8fd027c3f0134bf27cf642",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 89,
"avg_line_length": 14.454545454545455,
"alnum_prop": 0.6729559748427673,
"repo_name": "Matrixeigs/Optimization",
"id": "bef822ae825747ead82cc60bffe234ad61dd7237",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transportation_systems/test_cases/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "926917"
}
],
"symlink_target": ""
}
|
from .sub_resource import SubResource
class ApplicationGatewayBackendHttpSettings(SubResource):
"""Backend address pool settings of an application gateway.
:param id: Resource Identifier.
:type id: str
:param port: Port
:type port: int
:param protocol: Protocol. Possible values are: 'Http' and 'Https'.
Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2015_06_15.models.ApplicationGatewayProtocol
:param cookie_based_affinity: Cookie based affinity. Possible values are:
'Enabled' and 'Disabled'. Possible values include: 'Enabled', 'Disabled'
:type cookie_based_affinity: str or
~azure.mgmt.network.v2015_06_15.models.ApplicationGatewayCookieBasedAffinity
:param request_timeout: Request timeout in seconds. Application Gateway
will fail the request if response is not received within RequestTimeout.
Acceptable values are from 1 second to 86400 seconds.
:type request_timeout: int
:param probe: Probe resource of an application gateway.
:type probe: ~azure.mgmt.network.v2015_06_15.models.SubResource
:param provisioning_state: Gets or sets Provisioning state of the backend
http settings resource Updating/Deleting/Failed
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'cookie_based_affinity': {'key': 'properties.cookieBasedAffinity', 'type': 'str'},
'request_timeout': {'key': 'properties.requestTimeout', 'type': 'int'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, port=None, protocol=None, cookie_based_affinity=None, request_timeout=None, probe=None, provisioning_state=None, name=None, etag=None):
super(ApplicationGatewayBackendHttpSettings, self).__init__(id=id)
self.port = port
self.protocol = protocol
self.cookie_based_affinity = cookie_based_affinity
self.request_timeout = request_timeout
self.probe = probe
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
{
"content_hash": "8d800074fbcf9a0c2936d9cab07b7b1f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 167,
"avg_line_length": 47.75438596491228,
"alnum_prop": 0.6734019103600294,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "5c5fb92a3e4c0e02626ee38886e7ec00512f055b",
"size": "3196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/application_gateway_backend_http_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
"""The backups api."""
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import backups as backup_views
from cinder.api import xmlutil
from cinder import backup as backupAPI
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
def make_backup(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('container')
elem.set('volume_id')
elem.set('object_count')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('description')
elem.set('fail_reason')
def make_backup_restore(elem):
elem.set('backup_id')
elem.set('volume_id')
class BackupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backup', selector='backup')
make_backup(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backups')
elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups')
make_backup(elem)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupRestoreTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('restore', selector='restore')
make_backup_restore(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
backup = self._extract_backup(dom)
return {'body': {'backup': backup}}
def _extract_backup(self, node):
backup = {}
backup_node = self.find_first_child_named(node, 'backup')
attributes = ['container', 'display_name',
'display_description', 'volume_id']
for attr in attributes:
if backup_node.getAttribute(attr):
backup[attr] = backup_node.getAttribute(attr)
return backup
class RestoreDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
restore = self._extract_restore(dom)
return {'body': {'restore': restore}}
def _extract_restore(self, node):
restore = {}
restore_node = self.find_first_child_named(node, 'restore')
if restore_node.getAttribute('volume_id'):
restore['volume_id'] = restore_node.getAttribute('volume_id')
return restore
class BackupsController(wsgi.Controller):
"""The Backups API controller for the OpenStack API."""
_view_builder_class = backup_views.ViewBuilder
def __init__(self):
self.backup_api = backupAPI.API()
super(BackupsController, self).__init__()
@wsgi.serializers(xml=BackupTemplate)
def show(self, req, id):
"""Return data about the given backup."""
LOG.debug(_('show called for member %s'), id)
context = req.environ['cinder.context']
try:
backup = self.backup_api.get(context, backup_id=id)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.detail(req, backup)
def delete(self, req, id):
"""Delete a backup."""
LOG.debug(_('delete called for member %s'), id)
context = req.environ['cinder.context']
LOG.audit(_('Delete backup with id: %s'), id, context=context)
try:
self.backup_api.delete(context, id)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=BackupsTemplate)
def index(self, req):
"""Returns a summary list of backups."""
return self._get_backups(req, is_detail=False)
@wsgi.serializers(xml=BackupsTemplate)
def detail(self, req):
"""Returns a detailed list of backups."""
return self._get_backups(req, is_detail=True)
def _get_backups(self, req, is_detail):
"""Returns a list of backups, transformed through view builder."""
context = req.environ['cinder.context']
backups = self.backup_api.get_all(context)
limited_list = common.limited(backups, req)
if is_detail:
backups = self._view_builder.detail_list(req, limited_list)
else:
backups = self._view_builder.summary_list(req, limited_list)
return backups
# TODO(frankm): Add some checks here including
# - whether requested volume_id exists so we can return some errors
# immediately
# - maybe also do validation of swift container name
@wsgi.response(202)
@wsgi.serializers(xml=BackupTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new backup."""
LOG.debug(_('Creating new backup %s'), body)
if not self.is_valid_body(body, 'backup'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
backup = body['backup']
volume_id = backup['volume_id']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
container = backup.get('container', None)
name = backup.get('name', None)
description = backup.get('description', None)
LOG.audit(_("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
{'volume_id': volume_id, 'container': container},
context=context)
try:
new_backup = self.backup_api.create(context, name, description,
volume_id, container)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup.iteritems()))
return retval
@wsgi.response(202)
@wsgi.serializers(xml=BackupRestoreTemplate)
@wsgi.deserializers(xml=RestoreDeserializer)
def restore(self, req, id, body):
"""Restore an existing backup to a volume."""
LOG.debug(_('Restoring backup %(backup_id)s (%(body)s)'),
{'backup_id': id, 'body': body})
if not self.is_valid_body(body, 'restore'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
restore = body['restore']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
volume_id = restore.get('volume_id', None)
LOG.audit(_("Restoring backup %(backup_id)s to volume %(volume_id)s"),
{'backup_id': id, 'volume_id': volume_id},
context=context)
try:
new_restore = self.backup_api.restore(context,
backup_id=id,
volume_id=volume_id)
except exception.InvalidInput as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeSizeExceedsAvailableQuota as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': 0})
except exception.VolumeLimitExceeded as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': 0})
retval = self._view_builder.restore_summary(
req, dict(new_restore.iteritems()))
return retval
class Backups(extensions.ExtensionDescriptor):
"""Backups support."""
name = 'Backups'
alias = 'backups'
namespace = 'http://docs.openstack.org/volume/ext/backups/api/v1'
updated = '2012-12-12T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
collection_actions={'detail': 'GET'},
member_actions={'restore': 'POST'})
resources.append(res)
return resources
|
{
"content_hash": "e00d29f37f0e701b2a509c24ad239361",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 78,
"avg_line_length": 35.42537313432836,
"alnum_prop": 0.627343585422372,
"repo_name": "ntt-sic/cinder",
"id": "e284baa29ddf1eb8210cddbba9ada3c7b2a0ce35",
"size": "10155",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/api/contrib/backups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5200214"
},
{
"name": "Shell",
"bytes": "8994"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import logging
import sys
import os
import re
import shutil
from . import errors
from .tvdb import TVDB
log = logging.getLogger('Core')
class Episode(object):
def __init__(self, file_, number):
self.file_ = file_ # cache reverse reference to parent object
self.number = number
def __getattr__(self, name):
if name == 'episode':
msg = 'Missing episode: Set it with --episode or use %e in your --regex string'
raise AttributeError(msg)
msg = "'{}' object has no attribute '{}'".format(self.__class__.__name__, name)
raise AttributeError(msg)
def __getattribute__(self, item):
"""
Allow the retrieval of single digit episode numbers but return
it with a leading zero.
"""
if item is 'episode_2':
return '0{}'.format(self.number)
return object.__getattribute__(self, item)
def __int__(self):
return int(self.number)
def __repr__(self):
return 'Episode: {} (season {})'.format(self.number, self.file_.season)
def __str__(self):
return '{} - {}'.format(self.number, self.title)
class File(object):
output_format = '%n - %s%e - %t%x'
def __init__(self, show_name=None, season=None, episodes=(), extension=''):
self.show_name = show_name
self.season = season
self.episodes = [Episode(file_=self, number=i) for i in episodes]
self.extension = extension
def __repr__(self):
return self.name
def get_episode_output(self, filename, marker='%e', fill=2):
if '%e{' in filename:
fill = filename.split('%e{')[1][:1]
marker = '%e{' + fill + '}'
episode = '-'.join([str(e.number).zfill(int(fill)) for e in self.episodes])
return filename.replace(marker, episode)
def get_season_output(self, filename, marker='%s', fill=1):
if '%s{' in filename:
fill = filename.split('%s{')[1][:1]
marker = '%s{' + fill + '}'
season = str(self.season).zfill(int(fill))
return filename.replace(marker, season)
@property
def name(self):
filename = self.output_format
filename = filename.replace('%n', self.show_name)
filename = filename.replace('%t', self.title)
filename = filename.replace('%x', self.extension)
filename = self.get_season_output(filename)
filename = self.get_episode_output(filename)
return filename
@property
def title(self):
titles = [e.title for e in self.episodes]
# Check the titles aren't all the same with different (x) parts
suffixes = tuple('({})'.format(i + 1) for i in range(len(titles)))
if any([t.endswith(suffixes) for t in titles]):
stripped_titles = set([t[:-4] for t in titles])
if len(stripped_titles) is 1:
titles = stripped_titles
return ' & '.join(titles)
def safety_check(self):
"""
Check we have all the necessary information to rename a file.
"""
if self.show_name is None:
raise errors.MissingInformationException('A show name')
if self.season is None:
raise errors.MissingInformationException('A season number')
if not self.episodes:
raise errors.MissingInformationException('An episode number')
for e in self.episodes:
if e.number is None:
raise errors.MissingInformationException('An episode number')
def set_output_format(self, user_format):
self.output_format = user_format
def user_overrides(self, show_name, season, episode):
if show_name:
self.show_name = show_name
if season:
self.season = int(season)
if episode:
if self.episodes:
for e in self.episodes:
e.number = int(episode)
else:
self.episodes = [Episode(file_=self, number=episode)]
class TvRenamr(object):
def __init__(self, working_dir, debug=False, dry=False, cache=True):
self.cache = cache
self.working_dir = working_dir
self.dry = dry
self.debug = debug
def remove_part_from_multiple_episodes(self, show_name):
"""Remove the string "Part " from a filename.
In episode titles of multi-part episodes that use the format
(Part n) remove the 'Part ' section so the format is (n).
"""
log.debug('Removing Part from episode name')
return show_name.replace('(Part ', '(')
def extract_details_from_file(self, fn, user_regex=None, partial=False):
"""Using a regular expression extract information from the filename passed in.
Looks at the file given and extracts from it the show title, it's
season number and episode number using regular expression magic.
The default formats accepted are: series.0x00.xxx or series.s0e00.xxx
A user can specify their own regular expression for a format not
already covered.
"""
try:
fn = fn.decode('utf-8')
except AttributeError: # python 3
pass
fn = self._sanitise_filename(fn)
log.log(22, 'Renaming: %s', fn)
# If we sanitise the filename we shall sanitise the regex too
if user_regex is not None:
user_regex = self._sanitise_filename(user_regex)
regex = self._build_regex(user_regex, partial=partial)
log.debug('Attempting rename with: {}'.format(regex))
matches = re.match(regex, fn)
if not matches:
raise errors.UnexpectedFormatException(fn)
return self._build_credentials(fn, matches)
def retrieve_episode_title(self, episode, canonical=None, override=None):
"""Retrieves the title of a given episode.
The series name, season and episode numbers must be specified to get
the episode's title.
"""
if canonical is not None:
episode.file_.show_name = canonical
log.debug('Show Name: %s', episode.file_.show_name)
args = (episode.file_.show_name, episode.file_.season, episode.number, self.cache)
self.lookup = TVDB(*args) # assign to self for use in format_show_name
log.info('Episode: %s', self.lookup.title)
return override or self.lookup.title
def format_show_name(self, show_name, the=False):
if show_name is None:
show_name = self.lookup.show
log.debug('Using the formatted show name retrieved from The TvDb')
else:
log.debug('Using config output name: %s', show_name)
if the is True:
show_name = self._move_leading_the_to_trailing_the(show_name)
log.debug('Final show name: %s', show_name)
return show_name
def build_path(self, _file, rename_dir, organise=False, specials_folder=None):
"""Build the full destination path and filename of the renamed file.
By default the format is:
Show Name - Season NumberEpisode Number - Episode Title.format.
Builds the new path for the file to be renamed to, by default this is
the working directory. Users can specify a directory to move files to
once renamed using the renamed_dir option. The auto_move option can be
used to specify a top level directory where files will be placed in
season and show folders, i.e. Show/Season 1/episodes
"""
if organise is True:
args = [rename_dir, _file.show_name, _file.season, specials_folder]
rename_dir = self._build_organise_path(*args)
log.log(22, 'Directory: %s', rename_dir)
path = os.path.join(rename_dir, _file.name)
log.debug('Full path: %s', path)
return path
def rename(self, current_filepath, destination_filepath, copy=False, symlink=False):
"""Renames a file.
This is more akin to the UNIX `mv` operation as the destination filepath
can be anywhere on the filesystem.
Returns the new filename for use elsewhere.
"""
if os.path.exists(destination_filepath):
raise errors.PathExistsException(destination_filepath)
log.debug(os.path.join(self.working_dir, current_filepath))
log.debug(destination_filepath)
if not self.dry and not self.debug:
source_filepath = os.path.join(self.working_dir, current_filepath)
if copy:
shutil.copy(source_filepath, destination_filepath)
elif symlink:
# os.symlink doesn't work on windows with python < 3.3
if os.name == 'posix' or sys.version_info >= (3, 3):
os.symlink(source_filepath, destination_filepath)
elif os.name == 'nt':
import ctypes
source_filepath = source_filepath.decode('UTF-8')
kernel_dll = ctypes.windll.LoadLibrary("kernel32.dll")
kernel_dll.CreateSymbolicLinkW(destination_filepath,
source_filepath, 0)
else:
shutil.move(source_filepath, destination_filepath)
destination_file = os.path.split(destination_filepath)[1]
log.log(26, 'Renamed: "%s"', destination_file)
return destination_filepath
def _build_credentials(self, fn, matches):
"""Build a dictionary of a file's extracted credentials."""
details = {}
try:
details['show_name'] = matches.group('show_name').replace('.', ' ').strip()
except IndexError:
pass
try:
details['season'] = str(int(matches.group('season')))
except IndexError:
pass
episodes = []
for group in ('episode', 'episode2'):
try:
episodes.append(str(int(matches.group(group))))
except (IndexError, KeyError, TypeError):
pass
details.update({
'episodes': episodes,
'extension': os.path.splitext(fn)[1]
})
msg = ', '.join('{}: {}'.format(key, value) for key, value in details.items())
log.debug('Filename yielded: %s', msg)
return details
def _build_organise_path(self, start_path, show_name, season_number, specials=None):
"""
Constructs a directory path using the show's details.
Show name and season number of an episode dictate the folder structure.
"""
season = 'Season {}'.format(season_number)
if season_number is 0 and specials is not None: # specials folder
season = specials
path = os.path.join(start_path, show_name, season)
if not (os.path.exists(path) or self.dry or self.debug):
os.makedirs(path)
log.debug('Directories created for path: ' + path)
return path
def _build_regex(self, regex=None, partial=False):
"""Builds the regular expression to extract a files details.
Custom syntax can be used in the regular expression to help specify
parts of the episode's file name. These custom syntax snippets are
replaced by the regular expression blocks show.
%n - [\w\s.,_-]+ - The show name.
%s - \d{1,2} - The season number.
%e - \d{2} - The episode number.
"""
series = r"(?P<show_name>[\w\s\(\).',_-]+)"
season = r"(?P<season>\d{1,2})"
episode = r"(?P<episode>\d{2})"
second_episode = r".E?(?P<episode2>\d{2})*"
if regex is None:
# Build default regex
return series + r"\.[Ss]?" + season + r"[XxEe]?" + episode + second_episode
if not partial and not ('%n' in regex or '%s' in regex or '%e' in regex):
raise errors.IncorrectRegExpException(regex)
# series name
regex = regex.replace('%n', series)
# season number
# %s{n}
if '%s{' in regex:
log.debug('Season digit number found')
r = regex.split('%s{')[1][:1]
log.debug('Specified %s season digits', r)
s = season.replace('1,2', r)
regex = regex.replace('%s{' + r + '}', s)
log.debug('Season regex set: %s', s)
# %s
if '%s' in regex:
regex = regex.replace('%s', season)
log.debug('Default season regex set: %s', regex)
# episode number
# %e{n}
if '%e{' in regex:
log.debug('User set episode digit number found')
r = regex.split('%e{')[1][:1]
log.debug('User specified %s episode digits', r)
e = episode.replace('2', r)
regex = regex.replace('%e{' + r + '}', e)
log.debug('Episode regex set: %s', e)
# %e
if '%e' in regex:
regex = regex.replace('%e', episode)
log.debug('Default episode regex set: %s', regex)
return regex
def _move_leading_the_to_trailing_the(self, show_name):
"""Moves the leading 'The' of a show name to a trailing 'The'.
A comma and space are added before the new 'The'.
"""
if not(show_name.startswith('The ')):
return show_name
log.debug("Moving leading 'The' to end of: %s", show_name)
return show_name[4:] + ', The'
def _sanitise_filename(self, filename):
"""
Remove bits of the filename that cause a problem.
Initially added to deal specifically with the issues 720[p] causes
in filenames by appearing before or after the season/episode block.
"""
items = (
('[', '.'),
('_', '.'),
(' ', '.'),
('.720p', ''),
('.720', ''),
('.1080p', ''),
('.1080', ''),
('.H.264', ''),
('.h.264', ''),
)
for target, replacement in items:
filename = filename.replace(target, replacement)
return filename
|
{
"content_hash": "c0fad67de0a5615a1a33231ce2bee9dd",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 91,
"avg_line_length": 34.696821515892424,
"alnum_prop": 0.5734620534141357,
"repo_name": "ghickman/tvrenamr",
"id": "b1b93b6e6a1bdc2351ade53431ecd47ae9cc1d11",
"size": "14191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvrenamr/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "351"
},
{
"name": "Python",
"bytes": "66354"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# Issue 182: six no longer included with Django 3.0
try:
from django.utils import six
except ImportError:
import six
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from avatar.models import Avatar
from avatar.signals import avatar_updated
from avatar.utils import get_user_model
class AvatarAdmin(admin.ModelAdmin):
list_display = ('get_avatar', 'user', 'primary', "date_uploaded")
list_filter = ('primary',)
search_fields = ('user__%s' % getattr(get_user_model(), 'USERNAME_FIELD', 'username'),)
list_per_page = 50
def get_avatar(self, avatar_in):
context = dict({
'user': avatar_in.user,
'url': avatar_in.avatar.url,
'alt': six.text_type(avatar_in.user),
'size': 80,
})
return render_to_string('avatar/avatar_tag.html', context)
get_avatar.short_description = _('Avatar')
get_avatar.allow_tags = True
def save_model(self, request, obj, form, change):
super(AvatarAdmin, self).save_model(request, obj, form, change)
avatar_updated.send(sender=Avatar, user=request.user, avatar=obj)
admin.site.register(Avatar, AvatarAdmin)
|
{
"content_hash": "deb39c6d8ea6b8767de9bbe300d75546",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 91,
"avg_line_length": 33,
"alnum_prop": 0.671451355661882,
"repo_name": "jezdez/django-avatar",
"id": "a47f74b2da5f323fba936f520046ff5c901aefdd",
"size": "1254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "avatar/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2852"
},
{
"name": "Makefile",
"bytes": "377"
},
{
"name": "Python",
"bytes": "41299"
}
],
"symlink_target": ""
}
|
"""
Ce module permet de créer des commandes pour faire agir les robots.
Des fonctions utilitaire permettent de transformer une commande de
Position (Pose) en une commande de vitesse.
L'embarqué et le simulateur utilise un vecteur de vitesse (Pose) pour
contrôler les robots.
"""
from abc import abstractmethod
import threading
from pyhermes import McuCommunicator
from RULEngine.Game.OurPlayer import OurPlayer
from RULEngine.Util.Pose import Pose
class Command(object):
def __init__(self, player: OurPlayer):
assert isinstance(player, OurPlayer)
self.player = player
# fixme Why does command need the speed???
if player.ai_command is not None:
self.cmd_repr = player.ai_command.speed
@abstractmethod
def package_command(self, mcu_communicator: McuCommunicator):
pass
class ResponseCommand(Command):
def __init__(self, player: OurPlayer, pause_cond: threading.Condition):
super().__init__(player)
self.pause_cond = pause_cond
self.completed = False
def wakeup_thread(self):
# We don't want wake up
with self.pause_cond:
self.completed = True
self.pause_cond.notify()
def pause_thread(self):
with self.pause_cond:
if not self.completed:
self.pause_cond.wait()
def package_command(self, mcu_communicator: McuCommunicator):
pass
class GetBattery(ResponseCommand):
def __init__(self, player, pause_cond: threading.Condition):
super().__init__(player, pause_cond)
def package_command(self, mcu_communicator: McuCommunicator):
return mcu_communicator.getBatterie(self.player.id)
class Move(Command):
def __init__(self, player: OurPlayer):
super().__init__(player)
def package_command(self, mcu_communicator: McuCommunicator) -> None:
mcu_communicator.sendSpeed(self.player.id,
self.cmd_repr.position.x,
self.cmd_repr.position.y,
self.cmd_repr.orientation)
class Kick(Command):
def __init__(self, player: OurPlayer):
super().__init__(player)
# TODO ask embedded for kick force integration MGL 2017/05/29
self.kick_speed = self.player.ai_command.kick_strength
def package_command(self, mcu_communicator: McuCommunicator) -> None:
mcu_communicator.kick(self.player.id)
class Stop(Command):
def __init__(self, player: OurPlayer):
super().__init__(player)
self.speed_repr = Pose()
def package_command(self, mcu_communicator: McuCommunicator) -> None:
mcu_communicator.sendSpeed(self.player.id, 0, 0, 0)
class StartChargingKick(Command):
def __init__(self, player: OurPlayer):
super().__init__(player)
def package_command(self, mcu_communicator: McuCommunicator) -> None:
mcu_communicator.charge(self.player.id)
class Dribbler(Command):
def __init__(self, player: OurPlayer, activate: bool=True):
super().__init__(player)
self.activate = activate
# todo ask embedded about dribbler strength MGL 2017/05/29
def package_command(self, mcu_communicator: McuCommunicator) -> None:
if self.activate:
mcu_communicator.turnOnDribbler(self.player.id)
else:
mcu_communicator.turnOffDribbler(self.player.id)
|
{
"content_hash": "1450eceb22dba459f72342b4a50c5394",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 75,
"avg_line_length": 32.16822429906542,
"alnum_prop": 0.647588611272516,
"repo_name": "MaximeGLegault/StrategyIA",
"id": "111d68d0ac809a941dc3c6b9c0341171f4deb874",
"size": "3482",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "RULEngine/Command/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "207240"
},
{
"name": "Protocol Buffer",
"bytes": "30229"
},
{
"name": "Python",
"bytes": "1445233"
}
],
"symlink_target": ""
}
|
"""First API, local access only"""
import hug
@hug.local()
def happy_birthday(name: hug.types.text, age: hug.types.number, hug_timer=3):
"""Says happy birthday to a user"""
return {'message': 'Happy {0} Birthday {1}!'.format(age, name),
'took': float(hug_timer)}
|
{
"content_hash": "6ce5fe21af1267e157b03fc129d467c4",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 77,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.631578947368421,
"repo_name": "MuhammadAlkarouri/hug",
"id": "3cc8dbeaea72402f635b13ce5dc866e5bf687e24",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/quick_start/first_step_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "58"
},
{
"name": "Python",
"bytes": "370944"
},
{
"name": "Shell",
"bytes": "5929"
}
],
"symlink_target": ""
}
|
import unittest
from tageditor import musicTitle
class TestTagEditor(unittest.TestCase):
def test_title_case(self):
title = "I LiKe TyPiNg LiKe ThIs"
self.assertEquals(musicTitle(title), "I Like Typing Like This")
def test_title_with_spaces(self):
title = "this contains spaces "
self.assertEquals(musicTitle(title), "This Contains Spaces")
def test_title_with_quotes(self):
title = "I 'am' isn't don't you're"
self.assertEquals(musicTitle(title), "I 'am' Isn't Don't You're")
def test_title_with_parans(self):
title = "best title (ever)"
self.assertEquals(musicTitle(title), "Best Title (Ever)")
def test_title_with_lp(self):
title = "marshall mathers lp"
self.assertEquals(musicTitle(title), "Marshall Mathers LP")
def test_title_with_ep(self):
title = "my bands ep"
self.assertEquals(musicTitle(title), "My Bands EP")
def test_title_with_roman_num(self):
self.assertEquals(musicTitle("version i"), "Version I")
self.assertEquals(musicTitle("version ii"), "Version II")
self.assertEquals(musicTitle("version iii"), "Version III")
self.assertEquals(musicTitle("version iv"), "Version IV")
self.assertEquals(musicTitle("version v"), "Version V")
def test_title_with_special_case_parens(self):
self.assertEquals(musicTitle("Version (iii)"), "Version (III)")
self.assertEquals(musicTitle("(CAN'T)"), "(Can't)")
self.assertEquals(musicTitle("test (ep)"), "Test (EP)")
self.assertEquals(musicTitle("test (lp)"), "Test (LP)")
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "db088114ada27089a8328eea2a246035",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 73,
"avg_line_length": 38.31818181818182,
"alnum_prop": 0.6405693950177936,
"repo_name": "mikekucharski/PyD3",
"id": "0877eb52e6306840d68564ea1598fe21173bfe66",
"size": "1686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_tageditor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12851"
}
],
"symlink_target": ""
}
|
import json
import pickle
from revscoring.scoring.statistics.classification.counts import \
MultilabelCounts
def test_counts():
c = MultilabelCounts(
["foo", "bar", "baz"],
[({'prediction': ["foo"]}, ["foo", "bar"])] * 10 +
[({'prediction': ["foo", "bar", "baz"]}, ["foo", "baz"])] * 20 +
[({'prediction': ["bar"]}, ["bar"])] * 30 +
[({'prediction': ["baz"]}, ["bar", "baz"])] * 40,
'prediction'
)
print(c.format_str({}))
print(json.dumps(c.format_json({}), indent=2))
assert c.lookup("n") == 100
assert c.lookup("labels.foo") == 30
assert c.lookup("predictions.foo.true.false") == 0
assert c.lookup("predictions.foo.true.true") == 30
assert c.lookup("predictions.bar.false.true") == 20
pickle.loads(pickle.dumps(c))
|
{
"content_hash": "1c95acc4906f19d936c67c7ce3067f76",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 31.307692307692307,
"alnum_prop": 0.5577395577395577,
"repo_name": "wiki-ai/revscoring",
"id": "9ec4a563f58fdf835736b557fb2d4d6677b9bc56",
"size": "814",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/scoring/statistics/classification/tests/test_multilabel_counts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "777"
},
{
"name": "Jupyter Notebook",
"bytes": "32675"
},
{
"name": "Python",
"bytes": "957061"
}
],
"symlink_target": ""
}
|
class BaseExtension:
def __init__(self, bot, config):
self.bot = bot
self.config = config
|
{
"content_hash": "ab427ba2ba701855a9c098681733987e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 36,
"avg_line_length": 27.5,
"alnum_prop": 0.5818181818181818,
"repo_name": "Slko/Slot",
"id": "899b2f2970b5057a887d9c5787b7ed83ceef1284",
"size": "110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slot/extensions/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "68413"
}
],
"symlink_target": ""
}
|
"""
$description Bulgarian CDN hosting live content for various websites in Bulgaria.
$url armymedia.bg
$url bgonair.bg
$url bloombergtv.bg
$url bnt.bg
$url live.bstv.bg
$url i.cdn.bg
$url nova.bg
$url mu-vi.tv
$type live
$region Bulgaria
"""
import logging
import re
from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
from streamlink.utils.url import update_scheme
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(r"""
https?://(?:www\.)?(?:
armymedia\.bg
|
bgonair\.bg/tvonline
|
bloombergtv\.bg/video
|
(?:tv\.)?bnt\.bg/\w+(?:/\w+)?
|
live\.bstv\.bg
|
i\.cdn\.bg/live/
|
nova\.bg/live
|
mu-vi\.tv/LiveStreams/pages/Live\.aspx
)/?
""", re.VERBOSE))
class CDNBG(Plugin):
@staticmethod
def _find_url(regex: re.Pattern) -> validate.all:
return validate.all(
validate.regex(regex),
validate.get("url"),
)
def _get_streams(self):
if "cdn.bg" in urlparse(self.url).netloc:
iframe_url = self.url
h = self.session.get_option("http-headers")
if not h or not h.get("Referer"):
log.error("Missing Referer for iframe URL, use --http-header \"Referer=URL\" ")
return
_referer = h.get("Referer")
else:
_referer = self.url
iframe_url = self.session.http.get(self.url, schema=validate.Schema(
validate.any(
self._find_url(
re.compile(r"'src',\s*'(?P<url>https?://i\.cdn\.bg/live/\w+)'\);")
),
validate.all(
validate.parse_html(),
validate.xml_xpath_string(".//iframe[contains(@src,'cdn.bg')][1]/@src")
)
)
))
if not iframe_url:
return
iframe_url = update_scheme("https://", iframe_url, force=False)
log.debug(f"Found iframe: {iframe_url}")
stream_url = self.session.http.get(
iframe_url,
headers={"Referer": _referer},
schema=validate.Schema(
validate.any(
self._find_url(
re.compile(r"sdata\.src.*?=.*?(?P<q>[\"'])(?P<url>http.*?)(?P=q)")
),
self._find_url(
re.compile(r"(src|file): (?P<q>[\"'])(?P<url>(https?:)?//.+?m3u8.*?)(?P=q)")
),
self._find_url(
re.compile(r"video src=(?P<url>http[^ ]+m3u8[^ ]*)")
),
self._find_url(
re.compile(r"source src=\"(?P<url>[^\"]+m3u8[^\"]*)\"")
),
# GEOBLOCKED
self._find_url(
re.compile(r"(?P<url>[^\"]+geoblock[^\"]+)")
),
)
)
)
if "geoblock" in stream_url:
log.error("Geo-restricted content")
return
return HLSStream.parse_variant_playlist(
self.session,
update_scheme(iframe_url, stream_url),
headers={"Referer": "https://i.cdn.bg/"},
)
__plugin__ = CDNBG
|
{
"content_hash": "cb67ab844c747218151959d11ba4451e",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 100,
"avg_line_length": 29.752136752136753,
"alnum_prop": 0.47112898592358515,
"repo_name": "chhe/streamlink",
"id": "c080becebd1ec4bc626be50ad59f2c3e7dc74a12",
"size": "3481",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/cdnbg.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1513527"
},
{
"name": "Shell",
"bytes": "6427"
}
],
"symlink_target": ""
}
|
"""
PlexAPI Examples
As of Plex version 0.9.11 I noticed that you must be logged in
to browse even the plex server locatewd at localhost. You can
run this example suite with the following command:
>> python examples.py -u <USERNAME> -p <PASSWORD> -s <SERVERNAME>
"""
import argparse, sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from utils import fetch_server, iter_tests
def example_001_list_all_unwatched_content(plex):
""" Example 1: List all unwatched content in library """
for section in plex.library.sections():
print('Unwatched content in %s:' % section.title)
for video in section.unwatched():
print(' %s' % video.title)
def example_002_mark_all_conan_episodes_watched(plex):
""" Example 2: Mark all Friends episodes watched. """
plex.library.section('TV Shows').get('Friends').markWatched()
def example_003_list_all_clients(plex):
""" Example 3: List all Clients connected to the Server. """
for client in plex.clients():
print(client.name)
else:
print 'No clients'
def example_004_play_avatar_on_iphone(plex):
""" Example 4: Play the Movie Avatar on my iPhone.
Note: Client must be on same network as server.
"""
avatar = plex.library.section('Movies').get('Avatar')
client = plex.client("iphone-mike")
client.playMedia(avatar)
def example_005_search(plex):
""" Example 5: List all content with the word 'Game' in the title. """
for video in plex.search('Game'):
print('%s (%s)' % (video.title, video.TYPE))
def example_006_follow_the_talent(plex):
""" Example 6: List all movies directed by the same person as Jurassic Park. """
movies = plex.library.section('Movies')
jurassic_park = movies.get('Jurassic Park')
director = jurassic_park.directors[0]
for movie in movies.search(None, director=director):
print(movie.title)
def example_007_list_files(plex):
""" Example 7: List files for the latest episode of Friends. """
the_last_one = plex.library.section('TV Shows').get('Friends').episodes()[-1]
for part in the_last_one.iter_parts():
print(part.file)
def example_008_get_stream_url(plex):
""" Example 8: Get a URL you can open in VLC, MPV, etc. """
jurassic_park = plex.library.section('Movies').get('Jurassic Park')
print 'Try running the following command:'
print 'vlc "%s"' % jurassic_park.getStreamUrl(videoResolution='800x600')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run PlexAPI examples.')
parser.add_argument('-r', '--resource', help='Name of the Plex resource (requires user/pass).')
parser.add_argument('-u', '--username', help='Username for the Plex server.')
parser.add_argument('-p', '--password', help='Password for the Plex server.')
parser.add_argument('-n', '--name', help='Only run tests containing this string. Leave blank to run all examples.')
args = parser.parse_args()
plex, user = fetch_server(args)
for example in iter_tests(__name__, args):
example(plex)
|
{
"content_hash": "9d0e77cbdf5630faded7ba0a14fc498a",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 119,
"avg_line_length": 37.154761904761905,
"alnum_prop": 0.6719000320410125,
"repo_name": "jmjordan/python-plexapi",
"id": "1a2b10d41e83920b46fc4dd9283f9a084203e9e7",
"size": "3121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/examples.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "50591"
}
],
"symlink_target": ""
}
|
try:
import unittest2 as unittest
except ImportError:
import unittest
from sunlight.service import EntityList, EntityDict
class TestEntityList(unittest.TestCase):
def test_list_meta(self):
obj = EntityList(['foo', 'bar'], meta={'number': 20})
self.assertIsNotNone(obj._meta)
class TestEntityDict(unittest.TestCase):
def test_dict_meta(self):
obj = EntityDict({'foo': 'bar'}, meta={'number': 20})
self.assertIsNotNone(obj._meta)
|
{
"content_hash": "41b4685eb9b710955c7b3a11b8ae154c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 61,
"avg_line_length": 24.15,
"alnum_prop": 0.6749482401656315,
"repo_name": "sunlightlabs/python-sunlight",
"id": "ae1e0a38d0005e0a5c6fc3791c47ef1dbbaa0be2",
"size": "483",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/models/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "81908"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
import functools
import hashlib
import http.client as httpstatus
import json
from typing import Dict
from typing import List
from typing import Tuple
from unittest import mock
import uuid
from cinder import context
from cinder import db
from cinder import exception
from cinder.tests.unit import test
from cinder.tests.unit import utils as test_utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import lightos
FAKE_LIGHTOS_CLUSTER_NODES: Dict[str, List] = {
"nodes": [
{"UUID": "926e6df8-73e1-11ec-a624-000000000001",
"nvmeEndpoint": "192.168.75.10:4420"},
{"UUID": "926e6df8-73e1-11ec-a624-000000000002",
"nvmeEndpoint": "192.168.75.11:4420"},
{"UUID": "926e6df8-73e1-11ec-a624-000000000003",
"nvmeEndpoint": "192.168.75.12:4420"}
]
}
IPV6_LIST = ['::192:168:75:10', '::192:168:75:11', '::192:168:75:12']
FAKE_LIGHTOS_CLUSTER_NODES_IPV6: Dict[str, List] = {
"nodes": [
{"UUID": "926e6df8-73e1-11ec-a624-000000000001",
"nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[0])},
{"UUID": "926e6df8-73e1-11ec-a624-000000000002",
"nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[1])},
{"UUID": "926e6df8-73e1-11ec-a624-000000000003",
"nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[2])}
]
}
FAKE_LIGHTOS_CLUSTER_INFO: Dict[str, str] = {
'UUID': "926e6df8-73e1-11ec-a624-07ba3880f6cc",
'subsystemNQN': "nqn.2014-08.org.nvmexpress:NVMf:uuid:"
"f4a89ce0-9fc2-4900-bfa3-00ad27995e7b"
}
FAKE_CLIENT_HOSTNQN = "hostnqn1"
VOLUME_BACKEND_NAME = "lightos_backend"
RESERVED_PERCENTAGE = 30
DEVICE_SCAN_ATTEMPTS_DEFAULT = 5
LIGHTOS_API_SERVICE_TIMEOUT = 30
VOLUME_BACKEND_NAME = "lightos_backend"
RESERVED_PERCENTAGE = 30
DEFAULT_COMPRESSION = False
class InitiatorConnectorFactoryMocker:
@staticmethod
def factory(protocol, root_helper, driver=None,
use_multipath=False,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
arch=None,
*args, **kwargs):
return InitialConnectorMock()
class InitialConnectorMock:
nqn = FAKE_CLIENT_HOSTNQN
found_discovery_client = True
def get_hostnqn(self):
return self.__class__.nqn
def find_dsc(self):
return self.__class__.found_discovery_client
def get_connector_properties(self, root):
return dict(nqn=self.__class__.nqn,
found_dsc=self.__class__.found_discovery_client)
def get_connector_properties():
connector = InitialConnectorMock()
return dict(nqn=connector.get_hostnqn(),
found_dsc=connector.find_dsc())
def get_vol_etag(volume):
v = deepcopy(volume)
v.pop("ETag", None)
dump = json.dumps(v, sort_keys=True).encode('utf-8')
return hashlib.md5(dump).hexdigest()
class DBMock(object):
def __init__(self):
self.data = {
"projects": {},
}
def get_or_create_project(self, project_name) -> Dict:
project = self.data["projects"].setdefault(project_name, {})
return project
def get_project(self, project_name) -> Dict:
project = self.data["projects"].get(project_name, None)
return project if project else None
def delete_project(self, project_name) -> Dict:
assert project_name != "default", "can't delete default project"
project = self.get_project(project_name)
if not project:
return None
self.data["projects"].remove(project)
return project
def create_volume(self, volume) -> Tuple[int, Dict]:
assert volume["project_name"] and volume["name"], "must be provided"
project = self.get_or_create_project(volume["project_name"])
volumes = project.setdefault("volumes", [])
existing_volume = next(iter([vol for vol in volumes
if vol["name"] == volume["name"]]), None)
if not existing_volume:
volume["UUID"] = str(uuid.uuid4())
volumes.append(volume)
return httpstatus.OK, volume
return httpstatus.CONFLICT, None
def get_volume_by_uuid(self, project_name,
volume_uuid) -> Tuple[int, Dict]:
assert project_name and volume_uuid, "must be provided"
project = self.get_project(project_name)
if not project:
return httpstatus.NOT_FOUND, None
proj_vols = project.get("volumes", None)
if not proj_vols:
return httpstatus.NOT_FOUND, None
vol = next(iter([vol for vol in proj_vols
if vol["UUID"] == volume_uuid]), None)
return (httpstatus.OK, vol) if vol else (httpstatus.NOT_FOUND, None)
def update_volume_by_uuid(self, project_name,
volume_uuid, **kwargs) -> Tuple[int, Dict]:
error_code, volume = self.get_volume_by_uuid(project_name, volume_uuid)
if error_code != httpstatus.OK:
return error_code, None
etag = kwargs.get("etag", None)
if etag:
vol_etag = volume.get("ETag", None)
if etag != vol_etag:
return httpstatus.BAD_REQUEST, None
if kwargs.get("size", None):
volume["size"] = kwargs["size"]
if kwargs.get("acl", None):
volume["acl"] = {'values': kwargs.get('acl')}
volume["ETag"] = get_vol_etag(volume)
return httpstatus.OK, volume
def get_volume_by_name(self, project_name,
volume_name) -> Tuple[int, Dict]:
assert project_name and volume_name, "must be provided"
project = self.get_project(project_name)
if not project:
return httpstatus.NOT_FOUND, None
proj_vols = project.get("volumes", None)
if not proj_vols:
return httpstatus.NOT_FOUND, None
vol = next(iter([vol for vol in proj_vols
if vol["name"] == volume_name]), None)
return (httpstatus.OK, vol) if vol else (httpstatus.NOT_FOUND, None)
def delete_volume(self, project_name, volume_uuid) -> Tuple[int, Dict]:
assert project_name and volume_uuid, "must be provided"
project = self.get_project(project_name)
if not project:
return httpstatus.NOT_FOUND, None
proj_vols = project.get("volumes", None)
if not proj_vols:
return httpstatus.NOT_FOUND, None
for vol in proj_vols:
if vol["UUID"] == volume_uuid:
proj_vols.remove(vol)
return httpstatus.OK, vol
def update_volume(self, **kwargs):
assert("project_name" in kwargs and kwargs["project_name"]), \
"project_name must be provided"
def create_snapshot(self, snapshot) -> Tuple[int, Dict]:
assert snapshot["project_name"] and snapshot["name"], \
"must be provided"
project = self.get_or_create_project(snapshot["project_name"])
snapshots = project.setdefault("snapshots", [])
existing_snap = next(iter([snap for snap in snapshots
if snap["name"] == snapshot["name"]]), None)
if not existing_snap:
snapshot["UUID"] = str(uuid.uuid4())
snapshots.append(snapshot)
return httpstatus.OK, snapshot
return httpstatus.CONFLICT, None
def delete_snapshot(self, project_name, snapshot_uuid) -> Tuple[int, Dict]:
assert project_name and snapshot_uuid, "must be provided"
project = self.get_project(project_name)
if not project:
return httpstatus.NOT_FOUND, None
proj_snaps = project.get("snapshots", None)
if not proj_snaps:
return httpstatus.NOT_FOUND, None
for snap in proj_snaps:
if snap["UUID"] == snapshot_uuid:
proj_snaps.remove(snap)
return httpstatus.OK, snap
def get_snapshot_by_name(self, project_name,
snapshot_name) -> Tuple[int, Dict]:
assert project_name and snapshot_name, "must be provided"
project = self.get_project(project_name)
if not project:
return httpstatus.NOT_FOUND, None
proj_snaps = project.get("snapshots", None)
if not proj_snaps:
return httpstatus.NOT_FOUND, None
snap = next(iter([snap for snap in proj_snaps
if snap["name"] == snapshot_name]), None)
return (httpstatus.OK, snap) if snap else (httpstatus.NOT_FOUND, None)
def get_snapshot_by_uuid(self, project_name,
snapshot_uuid) -> Tuple[int, Dict]:
assert project_name and snapshot_uuid, "must be provided"
project = self.get_project(project_name)
if not project:
return httpstatus.NOT_FOUND, None
proj_snaps = project.get("snapshots", None)
if not proj_snaps:
return httpstatus.NOT_FOUND, None
snap = next(iter([snap for snap in proj_snaps
if snap["UUID"] == snapshot_uuid]), None)
return (httpstatus.OK, snap) if snap else (httpstatus.NOT_FOUND, None)
class LightOSStorageVolumeDriverTest(test.TestCase):
def setUp(self):
"""Initialize LightOS Storage Driver."""
super(LightOSStorageVolumeDriverTest, self).setUp()
configuration = mock.Mock(conf.Configuration)
configuration.lightos_api_address = \
"10.10.10.71,10.10.10.72,10.10.10.73"
configuration.lightos_api_port = 443
configuration.lightos_jwt = None
configuration.lightos_snapshotname_prefix = 'openstack_'
configuration.lightos_intermediate_snapshot_name_prefix = 'for_clone_'
configuration.lightos_default_compression_enabled = (
DEFAULT_COMPRESSION)
configuration.lightos_default_num_replicas = 3
configuration.num_volume_device_scan_tries = (
DEVICE_SCAN_ATTEMPTS_DEFAULT)
configuration.lightos_api_service_timeout = LIGHTOS_API_SERVICE_TIMEOUT
configuration.driver_ssl_cert_verify = False
# for some reason this value is not initialized by the driver parent
# configs
configuration.volume_name_template = 'volume-%s'
configuration.initiator_connector = (
"cinder.tests.unit.volume.drivers.lightos."
"test_lightos_storage.InitiatorConnectorFactoryMocker")
configuration.volume_backend_name = VOLUME_BACKEND_NAME
configuration.reserved_percentage = RESERVED_PERCENTAGE
def mocked_safe_get(config, variable_name):
if hasattr(config, variable_name):
return config.__getattribute__(variable_name)
else:
return None
configuration.safe_get = functools.partial(mocked_safe_get,
configuration)
self.driver = lightos.LightOSVolumeDriver(configuration=configuration)
self.ctxt = context.get_admin_context()
self.db: DBMock = DBMock()
# define a default send_cmd override to return default values.
def send_cmd_default_mock(cmd, timeout, **kwargs):
if cmd == "get_nodes":
return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_NODES)
if cmd == "get_node":
self.assertTrue(kwargs["UUID"])
for node in FAKE_LIGHTOS_CLUSTER_NODES["nodes"]:
if kwargs["UUID"] == node["UUID"]:
return (httpstatus.OK, node)
return (httpstatus.NOT_FOUND, node)
elif cmd == "get_cluster_info":
return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_INFO)
elif cmd == "create_volume":
project_name = kwargs["project_name"]
volume = {
"project_name": project_name,
"name": kwargs["name"],
"size": kwargs["size"],
"n_replicas": kwargs["n_replicas"],
"compression": kwargs["compression"],
"src_snapshot_name": kwargs["src_snapshot_name"],
"acl": {'values': kwargs.get('acl')},
"state": "Available",
}
volume["ETag"] = get_vol_etag(volume)
code, new_vol = self.db.create_volume(volume)
return (code, new_vol)
elif cmd == "delete_volume":
return self.db.delete_volume(kwargs["project_name"],
kwargs["volume_uuid"])
elif cmd == "get_volume":
return self.db.get_volume_by_uuid(kwargs["project_name"],
kwargs["volume_uuid"])
elif cmd == "get_volume_by_name":
return self.db.get_volume_by_name(kwargs["project_name"],
kwargs["volume_name"])
elif cmd == "extend_volume":
size = kwargs.get("size", None)
return self.db.update_volume_by_uuid(kwargs["project_name"],
kwargs["volume_uuid"],
size=size)
elif cmd == "create_snapshot":
snapshot = {
"project_name": kwargs.get("project_name", None),
"name": kwargs.get("name", None),
"state": "Available",
}
return self.db.create_snapshot(snapshot)
elif cmd == "delete_snapshot":
return self.db.delete_snapshot(kwargs["project_name"],
kwargs["snapshot_uuid"])
elif cmd == "get_snapshot":
return self.db.get_snapshot_by_uuid(kwargs["project_name"],
kwargs["snapshot_uuid"])
elif cmd == "get_snapshot_by_name":
return self.db.get_snapshot_by_name(kwargs["project_name"],
kwargs["snapshot_name"])
elif cmd == "update_volume":
return self.db.update_volume_by_uuid(**kwargs)
else:
raise RuntimeError(
f"'{cmd}' is not implemented. kwargs: {kwargs}")
self.driver.cluster.send_cmd = send_cmd_default_mock
def test_setup_should_fail_if_lightos_client_cant_auth_cluster(self):
"""Verify lightos_client fail with bad auth."""
def side_effect(cmd, timeout):
if cmd == "get_cluster_info":
return (httpstatus.UNAUTHORIZED, None)
else:
raise RuntimeError()
self.driver.cluster.send_cmd = side_effect
self.assertRaises(exception.InvalidAuthKey,
self.driver.do_setup, None)
def test_setup_should_succeed(self):
"""Test that lightos_client succeed."""
self.driver.do_setup(None)
def test_create_volume_should_succeed(self):
"""Test that lightos_client succeed."""
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_create_volume_same_volume_twice_succeed(self):
"""Test succeed to create an exiting volume."""
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.driver.create_volume(volume)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def _create_volume_in_failed_state(self, vol_state):
"""Verify scenario of created volume in failed state:
Driver is expected to issue a deletion command and raise exception
"""
def send_cmd_mock(cmd, **kwargs):
if cmd == "create_volume":
project_name = kwargs["project_name"]
volume = {
"project_name": project_name,
"name": kwargs["name"],
"size": kwargs["size"],
"n_replicas": kwargs["n_replicas"],
"compression": kwargs["compression"],
"src_snapshot_name": kwargs["src_snapshot_name"],
"acl": {'values': kwargs.get('acl')},
"state": vol_state,
}
volume["ETag"] = get_vol_etag(volume)
code, new_vol = self.db.create_volume(volume)
return (code, new_vol)
else:
return cluster_send_cmd(cmd, **kwargs)
self.driver.do_setup(None)
cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd)
self.driver.cluster.send_cmd = send_cmd_mock
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, volume)
proj = self.db.data["projects"][lightos.LIGHTOS_DEFAULT_PROJECT_NAME]
actual_volumes = proj["volumes"]
self.assertEqual(0, len(actual_volumes))
db.volume_destroy(self.ctxt, volume.id)
def test_create_volume_in_failed_state(self):
self._create_volume_in_failed_state("Failed")
def test_create_volume_in_rollback_state(self):
self._create_volume_in_failed_state("Rollback")
def test_create_volume_in_migrating_state_succeed(self):
"""Verify scenario of created volume in migrating state:
Driver is expected to succeed.
"""
def send_cmd_mock(cmd, **kwargs):
if cmd == "create_volume":
project_name = kwargs["project_name"]
volume = {
"project_name": project_name,
"name": kwargs["name"],
"size": kwargs["size"],
"n_replicas": kwargs["n_replicas"],
"compression": kwargs["compression"],
"src_snapshot_name": kwargs["src_snapshot_name"],
"acl": {'values': kwargs.get('acl')},
"state": "Migrating",
}
volume["ETag"] = get_vol_etag(volume)
code, new_vol = self.db.create_volume(volume)
return (code, new_vol)
else:
return cluster_send_cmd(cmd, **kwargs)
self.driver.do_setup(None)
cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd)
self.driver.cluster.send_cmd = send_cmd_mock
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
proj = self.db.data["projects"][lightos.LIGHTOS_DEFAULT_PROJECT_NAME]
actual_volumes = proj["volumes"]
self.assertEqual(1, len(actual_volumes))
db.volume_destroy(self.ctxt, volume.id)
def test_delete_volume_fail_if_not_created(self):
"""Test that lightos_client fail creating an already exists volume."""
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_extend_volume_should_succeed(self):
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.driver.extend_volume(volume, 6)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_get_volume_specs_compression_True(self):
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(
self.ctxt, self,
extra_specs={'compression': 'True'},
name='my_vol_typ1')
vol_type2 = test_utils.create_volume_type(
self.ctxt, self,
extra_specs={'compression': '<is> True'},
name='my_vol_type2')
vol_type3 = test_utils.create_volume_type(
self.ctxt, self,
name='my_vol_type3')
volume1 = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
volume2 = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type2.id)
volume3 = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type3.id)
compression, _, _ = self.driver._get_volume_specs(volume1)
self.assertTrue(compression == "True")
compression, _, _ = self.driver._get_volume_specs(volume2)
self.assertTrue(compression == "True")
compression, _, _ = self.driver._get_volume_specs(volume3)
self.assertTrue(compression == "False")
db.volume_destroy(self.ctxt, volume1.id)
db.volume_destroy(self.ctxt, volume2.id)
db.volume_destroy(self.ctxt, volume3.id)
def test_get_volume_specs_compression_False(self):
self.driver.do_setup(None)
self.driver.configuration.lightos_default_compression_enabled = True
vol_type = test_utils.create_volume_type(
self.ctxt, self,
extra_specs={'compression': 'False'},
name='my_vol_typ1')
vol_type2 = test_utils.create_volume_type(
self.ctxt, self,
extra_specs={'compression': '<is> False'},
name='my_vol_type2')
vol_type3 = test_utils.create_volume_type(
self.ctxt, self,
name='my_vol_type3')
volume1 = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
volume2 = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type2.id)
volume3 = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type3.id)
compression, _, _ = self.driver._get_volume_specs(volume1)
self.assertTrue(compression == "False")
compression, _, _ = self.driver._get_volume_specs(volume2)
self.assertTrue(compression == "False")
compression, _, _ = self.driver._get_volume_specs(volume3)
self.assertTrue(compression == "True")
db.volume_destroy(self.ctxt, volume1.id)
db.volume_destroy(self.ctxt, volume2.id)
db.volume_destroy(self.ctxt, volume3.id)
def test_extend_volume_should_fail_if_volume_does_not_exist(self):
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.assertRaises(exception.VolumeNotFound,
self.driver.extend_volume, volume, 6)
db.volume_destroy(self.ctxt, volume.id)
def test_create_snapshot(self):
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
snapshot = test_utils.create_snapshot(self.ctxt, volume_id=volume.id)
self.driver.create_volume(volume)
self.driver.create_snapshot(snapshot)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_delete_snapshot(self):
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
snapshot = test_utils.create_snapshot(self.ctxt, volume_id=volume.id)
self.driver.create_volume(volume)
self.driver.create_snapshot(snapshot)
self.driver.delete_snapshot(snapshot)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_create_volume_from_snapshot(self):
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
snapshot = test_utils.create_snapshot(self.ctxt, volume_id=volume.id)
self.driver.create_volume_from_snapshot(volume, snapshot)
proj = self.db.data["projects"][lightos.LIGHTOS_DEFAULT_PROJECT_NAME]
actual_volumes = proj["volumes"]
self.assertEqual(1, len(actual_volumes))
self.driver.delete_snapshot(snapshot)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
db.snapshot_destroy(self.ctxt, snapshot.id)
def test_initialize_connection(self):
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
connection_props = \
self.driver.initialize_connection(volume,
get_connector_properties())
self.assertIn('driver_volume_type', connection_props)
self.assertEqual('lightos', connection_props['driver_volume_type'])
self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'],
connection_props['data']['subsysnqn'])
self.assertEqual(
self.db.data['projects']['default']['volumes'][0]['UUID'],
connection_props['data']['uuid'])
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_initialize_connection_mirgrating_volume(self):
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = True
def send_cmd_mock(cmd, **kwargs):
if cmd == "create_volume":
project_name = kwargs["project_name"]
volume = {
"project_name": project_name,
"name": kwargs["name"],
"size": kwargs["size"],
"n_replicas": kwargs["n_replicas"],
"compression": kwargs["compression"],
"src_snapshot_name": kwargs["src_snapshot_name"],
"acl": {'values': kwargs.get('acl')},
"state": "Migrating",
}
volume["ETag"] = get_vol_etag(volume)
code, new_vol = self.db.create_volume(volume)
return (code, new_vol)
else:
return cluster_send_cmd(cmd, **kwargs)
self.driver.do_setup(None)
cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd)
self.driver.cluster.send_cmd = send_cmd_mock
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
connection_props = (
self.driver.initialize_connection(volume,
get_connector_properties()))
self.assertIn('driver_volume_type', connection_props)
self.assertEqual('lightos', connection_props['driver_volume_type'])
self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'],
connection_props['data']['subsysnqn'])
self.assertEqual(
self.db.data['projects']['default']['volumes'][0]['UUID'],
connection_props['data']['uuid'])
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_initialize_connection_ipv6(self):
def side_effect(cmd, timeout, **kwargs):
if cmd == "get_nodes":
return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_NODES_IPV6)
else:
return cluster_send_cmd(cmd, timeout, **kwargs)
cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd)
self.driver.cluster.send_cmd = side_effect
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
connection_props = (
self.driver.initialize_connection(volume,
get_connector_properties()))
self.assertIn('driver_volume_type', connection_props)
self.assertEqual('lightos', connection_props['driver_volume_type'])
self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'],
connection_props['data']['subsysnqn'])
self.assertEqual(
self.db.data['projects']['default']['volumes'][0]['UUID'],
connection_props['data']['uuid'])
for connection in connection_props['data']['lightos_nodes']:
self.assertIn(connection, IPV6_LIST)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_initialize_connection_no_hostnqn_should_fail(self):
InitialConnectorMock.nqn = ""
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, volume,
get_connector_properties())
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_initialize_connection_no_dsc_should_fail(self):
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = False
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection, volume,
get_connector_properties())
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_terminate_connection_with_hostnqn(self):
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.driver.terminate_connection(volume, get_connector_properties())
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_terminate_connection_with_empty_hostnqn_should_fail(self):
InitialConnectorMock.nqn = ""
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection, volume,
get_connector_properties())
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_force_terminate_connection_with_empty_hostnqn(self):
InitialConnectorMock.nqn = ""
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.driver.terminate_connection(volume, get_connector_properties(),
force=True)
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
def test_check_for_setup_error(self):
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
self.driver.check_for_setup_error()
def test_check_for_setup_error_no_subsysnqn_should_fail(self):
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
self.driver.cluster.subsystemNQN = ""
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
def test_check_for_setup_error_no_hostnqn_should_fail(self):
InitialConnectorMock.nqn = ""
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
def test_check_ip_format(self):
InitialConnectorMock.nqn = ""
InitialConnectorMock.found_discovery_client = True
self.driver.do_setup(None)
host = "1.1.1.1"
port = 8009
endpoint = self.driver.cluster._format_endpoint(host, port)
self.assertEqual("1.1.1.1:8009", endpoint)
host = "::1111"
endpoint = self.driver.cluster._format_endpoint(host, port)
self.assertEqual("[::1111]:8009", endpoint)
def test_check_for_setup_error_no_dsc_should_succeed(self):
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = False
self.driver.do_setup(None)
self.driver.check_for_setup_error()
def test_create_clone(self):
self.driver.do_setup(None)
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
volume = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
clone = test_utils.create_volume(self.ctxt, size=4,
volume_type_id=vol_type.id)
self.driver.create_volume(volume)
self.driver.create_cloned_volume(clone, volume)
self.driver.delete_volume(volume)
self.driver.delete_volume(clone)
db.volume_destroy(self.ctxt, volume.id)
db.volume_destroy(self.ctxt, clone.id)
def test_get_volume_stats(self):
"""Test that lightos_client succeed."""
self.driver.do_setup(None)
volumes_data = self.driver.get_volume_stats(refresh=False)
assert len(volumes_data) == 0, "Expected empty config"
volumes_data = self.driver.get_volume_stats(refresh=True)
assert volumes_data['vendor_name'] == 'LightOS Storage', \
"Expected 'LightOS Storage', received %s" % \
volumes_data['vendor_name']
assert volumes_data['volume_backend_name'] == VOLUME_BACKEND_NAME, \
"Expected %s, received %s" % \
(VOLUME_BACKEND_NAME, volumes_data['volume_backend_name'])
assert volumes_data['driver_version'] == self.driver.VERSION, \
"Expected %s, received %s" % \
(self.driver.VERSION, volumes_data['driver_version'])
assert volumes_data['storage_protocol'] == "lightos", \
"Expected 'lightos', received %s" % \
volumes_data['storage_protocol']
assert volumes_data['reserved_percentage'] == RESERVED_PERCENTAGE, \
"Expected %d, received %s" % \
(RESERVED_PERCENTAGE, volumes_data['reserved_percentage'])
assert volumes_data['QoS_support'] is False, \
"Expected False, received %s" % volumes_data['QoS_support']
assert volumes_data['online_extend_support'] is True, \
"Expected True, received %s" % \
volumes_data['online_extend_support']
assert volumes_data['thin_provisioning_support'] is True, \
"Expected True, received %s" % \
volumes_data['thin_provisioning_support']
assert volumes_data['compression'] == [True, False], \
"Expected [True, False], received %s" % volumes_data['compression']
assert volumes_data['multiattach'] is True, \
"Expected True, received %s" % volumes_data['multiattach']
assert volumes_data['free_capacity_gb'] == 'infinite', \
"Expected 'infinite', received %s" % \
volumes_data['free_capacity_gb']
|
{
"content_hash": "b85094aef133344e698b180dda4ece58",
"timestamp": "",
"source": "github",
"line_count": 889,
"max_line_length": 79,
"avg_line_length": 44.66591676040495,
"alnum_prop": 0.5692051979449985,
"repo_name": "openstack/cinder",
"id": "35fddb1bbee174421426145c54da4b39913c18b4",
"size": "40353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "259"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "25078349"
},
{
"name": "Shell",
"bytes": "6456"
},
{
"name": "Smarty",
"bytes": "67595"
}
],
"symlink_target": ""
}
|
"""
Yelp API v2.0 code sample.
This program demonstrates the capability of the Yelp API version 2.0
by using the Search API to query for businesses by a search term and location,
and the Business API to query additional information about the top result
from the search query.
Please refer to http://www.yelp.com/developers/documentation for the API documentation.
This program requires the Python oauth2 library, which you can install via:
`pip install -r requirements.txt`.
Sample usage of the program:
`python sample.py --term="bars" --location="San Francisco, CA"`
"""
import argparse
import json
import pprint
import sys
import urllib
import urllib2
import pymongo
import oauth2
API_HOST = 'api.yelp.com'
DEFAULT_TERM = 'gastropubs'
DEFAULT_LOCATION = 'San Francisco, CA'
SEARCH_LIMIT = 20
SEARCH_PATH = '/v2/search/'
BUSINESS_PATH = '/v2/business/'
# OAuth credential placeholders that must be filled in by users.
CONSUMER_KEY = "pccrL83Z9i_QfTOXNWdITA"
CONSUMER_SECRET = "I0j3qakoBlVlIqhRXlHNdq9mqrE"
TOKEN = "_OFhCoQiorpb82GPbVG-YAc58j65YVOx"
TOKEN_SECRET = "QyGHeEA2cKO4iKpRsoBsSS4BPX4"
def request(host, path, url_params=None):
"""Prepares OAuth authentication and sends the request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
urllib2.HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = 'http://{0}{1}?'.format(host, urllib.quote(path.encode('utf8')))
consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
oauth_request = oauth2.Request(method="GET", url=url, parameters=url_params)
oauth_request.update(
{
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': oauth2.generate_timestamp(),
'oauth_token': TOKEN,
'oauth_consumer_key': CONSUMER_KEY
}
)
token = oauth2.Token(TOKEN, TOKEN_SECRET)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
signed_url = oauth_request.to_url()
print u'Querying {0} ...'.format(url)
conn = urllib2.urlopen(signed_url, None)
try:
response = json.loads(conn.read())
finally:
conn.close()
return response
def search(term, location):
"""Query the Search API by a search term and location.
Args:
term (str): The search term passed to the API.
location (str): The search location passed to the API.
Returns:
dict: The JSON response from the request.
"""
url_params = {
'term': term.replace(' ', '+'),
'location': location.replace(' ', '+'),
'limit': SEARCH_LIMIT
}
return request(API_HOST, SEARCH_PATH, url_params=url_params)
def get_business(business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
return request(API_HOST, business_path)
def query_api(term, location):
"""Queries the API by the input values from the user.
Args:
term (str): The search term to query.
location (str): The location of the business to query.
"""
response = search(term, location)
businesses = response.get('businesses')
if not businesses:
print u'No businesses for {0} in {1} found.'.format(term, location)
return
business_id = businesses[0]['id']
print u'{0} businesses found, querying business info for the top result "{1}" ...'.format(
len(businesses),
business_id
)
response = get_business(business_id)
print u'Result for business "{0}" found:'.format(business_id)
pprint.pprint(response, indent=2)
def main():
client = pymongo.MongoClient
db = client('yelp_database')
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--term', dest='term', default=DEFAULT_TERM, type=str, help='Search term (default: %(default)s)')
parser.add_argument('-l', '--location', dest='location', default=DEFAULT_LOCATION, type=str, help='Search location (default: %(default)s)')
input_values = parser.parse_args()
try:
query_api(input_values.term, input_values.location)
except urllib2.HTTPError as error:
sys.exit('Encountered HTTP error {0}. Abort program.'.format(error.code))
if __name__ == '__main__':
main()
|
{
"content_hash": "2f208c938db5d0b2da386efe0cc97e76",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 143,
"avg_line_length": 29.32704402515723,
"alnum_prop": 0.6635213381942955,
"repo_name": "willyem/yelp-api",
"id": "6f70b1e06c36d44a9b73510505c4416c5e55f278",
"size": "4687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v2/python/sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21186"
},
{
"name": "C#",
"bytes": "9497"
},
{
"name": "CSS",
"bytes": "2714"
},
{
"name": "HTML",
"bytes": "13386"
},
{
"name": "Java",
"bytes": "6903"
},
{
"name": "JavaScript",
"bytes": "5751"
},
{
"name": "Objective-C",
"bytes": "98205"
},
{
"name": "PHP",
"bytes": "31799"
},
{
"name": "Perl",
"bytes": "1363"
},
{
"name": "Python",
"bytes": "4687"
},
{
"name": "Ruby",
"bytes": "19599"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
}
|
import BoostBuild
t = BoostBuild.Tester(pass_toolset=False)
t.write("input.sss", "")
t.write("Jamroot.jam", """
import type ;
import common ;
import generators ;
import "class" : new ;
import feature : feature ;
import toolset : flags ;
type.register AAA : aaa ;
type.register BBB : bbb ;
type.register CCC : ccc ;
type.register DDD : ddd ;
type.register SSS : sss ;
feature aaa-path : : free path ;
feature bbb-path : : free path ;
class aaa-action : action
{
rule adjust-properties ( property-set )
{
local s = [ $(self.targets[1]).creating-subvariant ] ;
return [ $(property-set).add-raw
[ $(s).implicit-includes aaa-path : AAA ] ] ;
}
}
class aaa-generator : generator
{
rule action-class ( )
{
return aaa-action ;
}
}
class bbb-action : action
{
rule adjust-properties ( property-set )
{
local s = [ $(self.targets[1]).creating-subvariant ] ;
return [ $(property-set).add-raw
[ $(s).implicit-includes bbb-path : BBB ] ] ;
}
}
class bbb-generator : generator
{
rule action-class ( )
{
return bbb-action ;
}
}
generators.register-standard common.copy : SSS : AAA ;
generators.register-standard common.copy : SSS : BBB ;
# Produce two targets from a single source
rule make-aaa-bbb ( project name ? : property-set : sources * )
{
local result ;
local aaa = [ generators.construct $(project) $(name) : AAA :
[ $(property-set).add-raw <location-prefix>a-loc ] : $(sources) ] ;
local bbb = [ generators.construct $(project) $(name) : BBB :
[ $(property-set).add-raw <location-prefix>b-loc ] : $(sources) ] ;
return [ $(aaa[1]).add $(bbb[1]) ] $(aaa[2-]) $(bbb[2-]) ;
}
generate input : input.sss : <generating-rule>@make-aaa-bbb ;
explicit input ;
flags make-ccc AAAPATH : <aaa-path> ;
rule make-ccc ( target : sources * : properties * )
{
ECHO aaa path\: [ on $(target) return $(AAAPATH) ] ;
common.copy $(target) : $(sources) ;
}
flags make-ddd BBBPATH : <bbb-path> ;
rule make-ddd ( target : sources * : properties * )
{
ECHO bbb path\: [ on $(target) return $(BBBPATH) ] ;
common.copy $(target) : $(sources) ;
}
generators.register [ new aaa-generator $(__name__).make-ccc : SSS : CCC ] ;
generators.register [ new bbb-generator $(__name__).make-ddd : SSS : DDD ] ;
# This should have <aaapath>bin/a-loc
ccc output-c : input.sss : <implicit-dependency>input ;
# This should have <bbbpath>bin/b-loc
ddd output-d : input.sss : <implicit-dependency>input ;
""")
t.run_build_system()
t.expect_output_lines(["aaa path: bin/a-loc", "bbb path: bin/b-loc"])
t.cleanup()
|
{
"content_hash": "017a2b2d1451ea6d4fc06aa1232ce726",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 76,
"avg_line_length": 25.737864077669904,
"alnum_prop": 0.6212749905695963,
"repo_name": "davehorton/drachtio-server",
"id": "2d22f34087b9874f60d33173022ee77ee4b147a6",
"size": "2964",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "deps/boost_1_77_0/tools/build/test/feature_implicit_dependency.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "662596"
},
{
"name": "Dockerfile",
"bytes": "1330"
},
{
"name": "JavaScript",
"bytes": "60639"
},
{
"name": "M4",
"bytes": "35273"
},
{
"name": "Makefile",
"bytes": "5960"
},
{
"name": "Shell",
"bytes": "47298"
}
],
"symlink_target": ""
}
|
"""
This module can do slight modifications to a wiki page source code such that
the code looks cleaner. The changes are not supposed to change the look of the
rendered wiki page.
The following parameters are supported:
¶ms;
-always Don't prompt you for each replacement. Warning (see below)
has not to be confirmed. ATTENTION: Use this with care!
-summary:XYZ Set the summary message text for the edit to XYZ, bypassing
the predefined message texts with original and replacements
inserted.
All other parameters will be regarded as part of the title of a single page,
and the bot will only work on that single page.
&warning;
For regular use, it is recommended to put this line into your user-config.py:
cosmetic_changes = True
There is another config variable: You can set
cosmetic_changes_mylang_only = False
if you're running a bot on multiple sites and want to do cosmetic changes on
all of them, but be careful if you do.
"""
__version__ = '$Id$'
import pywikibot
import isbn
from pywikibot import pagegenerators, i18n
import sys
import re
warning = """ATTENTION: You can run this script as a stand-alone for testing purposes.
However, the changes are that are made are only minor, and other users
might get angry if you fill the version histories and watchlists with such
irrelevant changes."""
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
'&warning;': warning,
}
# Summary message when using this module as a stand-alone script
msg_standalone = 'cosmetic_changes-standalone'
# Summary message that will be appended to the normal message when
# cosmetic changes are made on the fly
msg_append = 'cosmetic_changes-append'
nn_iw_msg = u'<!--interwiki (no, sv, da first; then other languages alphabetically by name)-->'
# This is from interwiki.py;
# move it to family file and implement global instances
moved_links = {
'ca' : (u'ús de la plantilla', u'/ús'),
'cs' : (u'dokumentace', u'/doc'),
'de' : (u'dokumentation', u'/Meta'),
'en' : ([u'documentation',
u'template documentation',
u'template doc',
u'doc',
u'documentation, template'], u'/doc'),
'es' : ([u'documentación', u'documentación de plantilla'], u'/doc'),
'fa' : ([u'documentation',u'توضیحات',u'توضیحات الگو',u'doc'], u'/توضیحات'),
'fr' : (u'/documentation', u'/Documentation'),
'hu' : (u'sablondokumentáció', u'/doc'),
'id' : (u'template doc', u'/doc'),
'ja' : (u'documentation', u'/doc'),
'ka' : (u'თარგის ინფო', u'/ინფო'),
'ko' : (u'documentation', u'/설명문서'),
'ms' : (u'documentation', u'/doc'),
'pl' : (u'dokumentacja', u'/opis'),
'pt' : ([u'documentação', u'/doc'], u'/doc'),
'ro' : (u'documentaţie', u'/doc'),
'ru' : (u'doc', u'/doc'),
'sv' : (u'dokumentation', u'/dok'),
'vi' : (u'documentation', u'/doc'),
'zh' : ([u'documentation', u'doc'], u'/doc'),
}
# Template which should be replaced or removed.
# Use a list with two entries. The first entry will be replaced by the second.
# Examples:
# For removing {{Foo}}, the list must be:
# (u'Foo', None),
#
# The following also works:
# (u'Foo', ''),
#
# For replacing {{Foo}} with {{Bar}} the list must be:
# (u'Foo', u'Bar'),
#
# This also removes all template parameters of {{Foo}}
# For replacing {{Foo}} with {{Bar}} but keep the template
# parameters in its original order, please use:
# (u'Foo', u'Bar\g<parameters>'),
deprecatedTemplates = {
'wikipedia': {
'de': [
(u'Belege', u'Belege fehlen\g<parameters>'),
(u'Quelle', u'Belege fehlen\g<parameters>'),
(u'Quellen', u'Belege fehlen\g<parameters>'),
],
}
}
class CosmeticChangesToolkit:
def __init__(self, site, debug=False, redirect=False, namespace=None, pageTitle=None):
self.site = site
self.debug = debug
self.redirect = redirect
self.namespace = namespace
self.template = (self.namespace == 10)
self.talkpage = self.namespace >= 0 and self.namespace % 2 == 1
self.title = pageTitle
def change(self, text):
"""
Given a wiki source code text, return the cleaned up version.
"""
oldText = text
if self.site.sitename()== u'commons:commons' and self.namespace == 6:
text = self.commonsfiledesc(text)
text = self.fixSelfInterwiki(text)
text = self.standardizePageFooter(text)
text = self.cleanUpLinks(text)
text = self.cleanUpSectionHeaders(text)
text = self.putSpacesInLists(text)
text = self.translateAndCapitalizeNamespaces(text)
text = self.replaceDeprecatedTemplates(text)
text = self.resolveHtmlEntities(text)
text = self.validXhtml(text)
text = self.removeUselessSpaces(text)
text = self.removeNonBreakingSpaceBeforePercent(text)
text = self.fixSyntaxSave(text)
text = self.fixHtml(text)
text = self.fixStyle(text)
text = self.fixTypo(text)
text = self.fixArabicLetters(text)
try:
text = isbn.hyphenateIsbnNumbers(text)
except isbn.InvalidIsbnException, error:
pass
if self.debug:
pywikibot.showDiff(oldText, text)
return text
def fixSelfInterwiki(self, text):
"""
Interwiki links to the site itself are displayed like local links.
Remove their language code prefix.
"""
if not self.talkpage and pywikibot.calledModuleName() <> 'interwiki':
interwikiR = re.compile(r'\[\[%s\s?:([^\[\]\n]*)\]\]' % self.site.lang)
text = interwikiR.sub(r'[[\1]]', text)
return text
def standardizePageFooter(self, text):
"""
Makes sure that categories are put to the correct position, but
does not sort them.
"""
# The PyWikipediaBot is no longer allowed to touch categories on the German Wikipedia. See http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/bis_2006#Position_der_Personendaten_am_.22Artikelende.22
if self.site != pywikibot.getSite('de', 'wikipedia') and not self.template:
categories = pywikibot.getCategoryLinks(text, site = self.site)
text = pywikibot.replaceCategoryLinks(text, categories, site = self.site)
return text
def translateAndCapitalizeNamespaces(self, text):
"""
Makes sure that localized namespace names are used.
"""
# arz uses english stylish codes
if self.site.sitename() == 'wikipedia:arz':
return text
family = self.site.family
# wiki links aren't parsed here.
exceptions = ['nowiki', 'comment', 'math', 'pre']
for nsNumber in family.namespaces:
if not family.isDefinedNSLanguage(nsNumber, self.site.lang):
# Skip undefined namespaces
continue
namespaces = list(family.namespace(self.site.lang, nsNumber, all=True))
thisNs = namespaces.pop(0)
if nsNumber == 6 and family.name == 'wikipedia' and \
self.site.lang in ('en', 'fr'):
# do not change "Image" on en-wiki and fr-wiki
for image in [u'Image', u'image']:
if image in namespaces:
namespaces.remove(image)
# skip main (article) namespace
if thisNs and namespaces:
text = pywikibot.replaceExcept(text, r'\[\[\s*(' + '|'.join(namespaces) + ') *:(?P<nameAndLabel>.*?)\]\]', r'[[' + thisNs + ':\g<nameAndLabel>]]', exceptions)
return text
def cleanUpLinks(self, text):
# helper function which works on one link and either returns it
# unmodified, or returns a replacement.
def handleOneLink(match):
titleWithSection = match.group('titleWithSection')
label = match.group('label')
trailingChars = match.group('linktrail')
newline = match.group('newline')
if not self.site.isInterwikiLink(titleWithSection):
# The link looks like this:
# [[page_title|link_text]]trailing_chars
# We only work on namespace 0 because pipes and linktrails work
# differently for images and categories.
page = pywikibot.Page(pywikibot.Link(titleWithSection, self.site))
if page.namespace() == 0:
# Replace underlines by spaces, also multiple underlines
titleWithSection = re.sub('_+', ' ', titleWithSection)
# Remove double spaces
titleWithSection = re.sub(' +', ' ', titleWithSection)
# Remove unnecessary leading spaces from title,
# but remember if we did this because we eventually want
# to re-add it outside of the link later.
titleLength = len(titleWithSection)
titleWithSection = titleWithSection.lstrip()
hadLeadingSpaces = (len(titleWithSection) != titleLength)
hadTrailingSpaces = False
# Remove unnecessary trailing spaces from title,
# but remember if we did this because it may affect
# the linktrail and because we eventually want to
# re-add it outside of the link later.
if not trailingChars:
titleLength = len(titleWithSection)
titleWithSection = titleWithSection.rstrip()
hadTrailingSpaces = (len(titleWithSection) != titleLength)
# Convert URL-encoded characters to unicode
titleWithSection = pywikibot.url2unicode(titleWithSection, site = self.site)
if titleWithSection == '':
# just skip empty links.
return match.group()
# Remove unnecessary initial and final spaces from label.
# Please note that some editors prefer spaces around pipes. (See [[en:Wikipedia:Semi-bots]]). We remove them anyway.
if label is not None:
# Remove unnecessary leading spaces from label,
# but remember if we did this because we want
# to re-add it outside of the link later.
labelLength = len(label)
label = label.lstrip()
hadLeadingSpaces = (len(label) != labelLength)
# Remove unnecessary trailing spaces from label,
# but remember if we did this because it affects
# the linktrail.
if not trailingChars:
labelLength = len(label)
label = label.rstrip()
hadTrailingSpaces = (len(label) != labelLength)
else:
label = titleWithSection
if trailingChars:
label += trailingChars
if titleWithSection == label or titleWithSection[0].lower() + titleWithSection[1:] == label:
newLink = "[[%s]]" % label
# Check if we can create a link with trailing characters instead of a pipelink
elif len(titleWithSection) <= len(label) and label[:len(titleWithSection)] == titleWithSection and re.sub(trailR, '', label[len(titleWithSection):]) == '':
newLink = "[[%s]]%s" % (label[:len(titleWithSection)], label[len(titleWithSection):])
else:
# Try to capitalize the first letter of the title.
# Maybe this feature is not useful for languages that
# don't capitalize nouns...
#if not self.site.nocapitalize:
if self.site.sitename() == 'wikipedia:de':
titleWithSection = titleWithSection[0].upper() + titleWithSection[1:]
newLink = "[[%s|%s]]" % (titleWithSection, label)
# re-add spaces that were pulled out of the link.
# Examples:
# text[[ title ]]text -> text [[title]] text
# text[[ title | name ]]text -> text [[title|name]] text
# text[[ title |name]]text -> text[[title|name]]text
# text[[title| name]]text -> text [[title|name]]text
if hadLeadingSpaces and not newline:
newLink = ' ' + newLink
if hadTrailingSpaces:
newLink = newLink + ' '
if newline:
newLink = newline + newLink
return newLink
# don't change anything
return match.group()
trailR = re.compile(self.site.linktrail())
# The regular expression which finds links. Results consist of four groups:
# group title is the target page title, that is, everything before | or ].
# group section is the page section. It'll include the # to make life easier for us.
# group label is the alternative link title, that's everything between | and ].
# group linktrail is the link trail, that's letters after ]] which are part of the word.
# note that the definition of 'letter' varies from language to language.
linkR = re.compile(r'(?P<newline>[\n]*)\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' + self.site.linktrail() + ')')
text = pywikibot.replaceExcept(text, linkR, handleOneLink, ['comment', 'math', 'nowiki', 'pre', 'startspace'])
return text
def resolveHtmlEntities(self, text):
ignore = [
38, # Ampersand (&)
39, # Bugzilla 24093
60, # Less than (<)
62, # Great than (>)
91, # Opening bracket - sometimes used intentionally inside links
93, # Closing bracket - sometimes used intentionally inside links
124, # Vertical bar (??) - used intentionally in navigation bar templates on de:
160, # Non-breaking space ( ) - not supported by Firefox textareas
]
# ignore ' see http://eo.wikipedia.org/w/index.php?title=Liberec&diff=next&oldid=2320801
#if self.site.lang == 'eo':
# ignore += [39]
text = pywikibot.html2unicode(text, ignore = ignore)
return text
def validXhtml(self, text):
text = pywikibot.replaceExcept(text, r'(?i)<br[ /]*>', r'<br />', ['comment', 'math', 'nowiki', 'pre'])
return text
def removeUselessSpaces(self, text):
result = []
multipleSpacesR = re.compile(' +')
spaceAtLineEndR = re.compile(' $')
exceptions = ['comment', 'math', 'nowiki', 'pre', 'startspace', 'table', 'template']
text = pywikibot.replaceExcept(text, multipleSpacesR, ' ', exceptions)
text = pywikibot.replaceExcept(text, spaceAtLineEndR, '', exceptions)
return text
def removeNonBreakingSpaceBeforePercent(self, text):
'''
Newer MediaWiki versions automatically place a non-breaking space in
front of a percent sign, so it is no longer required to place it
manually.
'''
text = pywikibot.replaceExcept(text, r'(\d) %', r'\1 %', ['timeline'])
return text
def cleanUpSectionHeaders(self, text):
"""
For better readability of section header source code, puts a space
between the equal signs and the title.
Example: ==Section title== becomes == Section title ==
NOTE: This space is recommended in the syntax help on the English and
German Wikipedia. It might be that it is not wanted on other wikis.
If there are any complaints, please file a bug report.
"""
for level in range(1, 7):
equals = '=' * level
text = pywikibot.replaceExcept(text, r'\n' + equals + ' *(?P<title>[^=]+?) *' + equals + ' *\r\n', '\n' + equals + ' \g<title> ' + equals + '\r\n', ['comment', 'math', 'nowiki', 'pre'])
return text
def putSpacesInLists(self, text):
"""
For better readability of bullet list and enumeration wiki source code,
puts a space between the * or # and the text.
NOTE: This space is recommended in the syntax help on the English,
German, and French Wikipedia. It might be that it is not wanted on other
wikis. If there are any complaints, please file a bug report.
"""
exceptions = ['comment', 'math', 'nowiki', 'pre', 'source', 'timeline']
if not (self.redirect or self.template) and \
pywikibot.calledModuleName() != 'capitalize_redirects':
text = pywikibot.replaceExcept(
text,
r'(?m)^(?P<bullet>[:;]*(\*+|#+)[:;\*#]*)(?P<char>[^\s\*#:;].+?)', '\g<bullet> \g<char>',
exceptions)
return text
def replaceDeprecatedTemplates(self, text):
exceptions = ['comment', 'math', 'nowiki', 'pre']
if self.site.family.name in deprecatedTemplates and self.site.lang in deprecatedTemplates[self.site.family.name]:
for template in deprecatedTemplates[self.site.family.name][self.site.lang]:
old = template[0]
new = template[1]
if new == None:
new = ''
else:
new = '{{'+new+'}}'
if not self.site.nocapitalize:
old = '[' + old[0].upper() + old[0].lower() + ']' + old[1:]
text = pywikibot.replaceExcept(text, r'\{\{([mM][sS][gG]:)?' + old + '(?P<parameters>\|[^}]+|)}}', new, exceptions)
return text
#from fixes.py
def fixSyntaxSave(self, text):
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source', 'startspace']
# external link in double brackets
text = pywikibot.replaceExcept(text, r'\[\[(?P<url>https?://[^\]]+?)\]\]', r'[\g<url>]', exceptions)
# external link starting with double bracket
text = pywikibot.replaceExcept(text, r'\[\[(?P<url>https?://.+?)\]', r'[\g<url>]', exceptions)
# external link and description separated by a dash, with
# whitespace in front of the dash, so that it is clear that
# the dash is not a legitimate part of the URL.
text = pywikibot.replaceExcept(text, r'\[(?P<url>https?://[^\|\] \r\n]+?) +\| *(?P<label>[^\|\]]+?)\]', r'[\g<url> \g<label>]', exceptions)
# dash in external link, where the correct end of the URL can
# be detected from the file extension. It is very unlikely that
# this will cause mistakes.
text = pywikibot.replaceExcept(text, r'\[(?P<url>https?://[^\|\] ]+?(\.pdf|\.html|\.htm|\.php|\.asp|\.aspx|\.jsp)) *\| *(?P<label>[^\|\]]+?)\]', r'[\g<url> \g<label>]', exceptions)
return text
def fixHtml(self, text):
# Everything case-insensitive (?i)
# Keep in mind that MediaWiki automatically converts <br> to <br />
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source', 'startspace']
text = pywikibot.replaceExcept(text, r'(?i)<b>(.*?)</b>', r"'''\1'''" , exceptions)
text = pywikibot.replaceExcept(text, r'(?i)<strong>(.*?)</strong>', r"'''\1'''" , exceptions)
text = pywikibot.replaceExcept(text, r'(?i)<i>(.*?)</i>', r"''\1''" , exceptions)
text = pywikibot.replaceExcept(text, r'(?i)<em>(.*?)</em>', r"''\1''" , exceptions)
# horizontal line without attributes in a single line
text = pywikibot.replaceExcept(text, r'(?i)([\r\n])<hr[ /]*>([\r\n])', r'\1----\2', exceptions)
# horizontal line with attributes; can't be done with wiki syntax
# so we only make it XHTML compliant
text = pywikibot.replaceExcept(text, r'(?i)<hr ([^>/]+?)>', r'<hr \1 />', exceptions)
# a header where only spaces are in the same line
for level in range(1, 7):
equals = '\\1%s \\2 %s\\3' % ("="*level, "="*level)
text = pywikibot.replaceExcept(text,
r'(?i)([\r\n]) *<h%d> *([^<]+?) *</h%d> *([\r\n])'%(level, level),
r'%s'%equals, exceptions)
#remove empty <ref/>-tag
text = pywikibot.replaceExcept(text, r'(?i)<ref\s*/>', r'', exceptions)
# TODO: maybe we can make the bot replace <p> tags with \r\n's.
return text
def fixStyle(self, text):
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source', 'startspace']
# convert prettytable to wikitable class
if self.site.language in ('de', 'en'):
text = pywikibot.replaceExcept(text, ur'(class="[^"]*)prettytable([^"]*")', ur'\1wikitable\2', exceptions)
return text
def fixTypo(self, text):
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source', 'startspace', 'gallery', 'hyperlink', 'interwiki', 'link']
# change <number> ccm -> <number> cm³
text = pywikibot.replaceExcept(text, ur'(\d)\s* ccm', ur'\1 cm³', exceptions)
text = pywikibot.replaceExcept(text, ur'(\d)\s*ccm', ur'\1 cm³', exceptions)
# Solve wrong Nº sign with °C or °F
# additional exception requested on fr-wiki for this stuff
pattern = re.compile(u'«.*?»', re.UNICODE)
exceptions.append(pattern)
text = pywikibot.replaceExcept(text, ur'(\d)\s* [º°]([CF])', ur'\1 °\2', exceptions)
text = pywikibot.replaceExcept(text, ur'(\d)\s*[º°]([CF])', ur'\1 °\2', exceptions)
text = pywikibot.replaceExcept(text, ur'º([CF])', ur'°\1', exceptions)
return text
def fixArabicLetters(self, text):
if self.site.lang=='ckb' or self.site.lang=='fa':
exceptions = [
'gallery',
'hyperlink',
'interwiki',
# but changes letters inside wikilinks
#'link',
'math',
'pre',
'template',
'timeline',
'ref',
'source',
'startspace',
'inputbox',
]
# do not change inside file links
namespaces = list(self.site.namespace(6, all = True))
pattern = re.compile(u'\[\[(' + '|'.join(namespaces) + '):.+?\..+?\]\]',
re.UNICODE)
exceptions.append(pattern)
text = pywikibot.replaceExcept(text, u',', u'،', exceptions)
if self.site.lang=='ckb':
text = pywikibot.replaceExcept(text,
ur'ه([.،_<\]\s])',
ur'ە\1', exceptions)
text = pywikibot.replaceExcept(text, u'ه', u'ە', exceptions)
text = pywikibot.replaceExcept(text, u'ه', u'ھ', exceptions)
text = pywikibot.replaceExcept(text, u'ك', u'ک', exceptions)
text = pywikibot.replaceExcept(text, ur'[ىي]', u'ی', exceptions)
# replace persian digits
for i in range(0,10):
if self.site.lang=='ckb':
text = pywikibot.replaceExcept(text,
u'۰۱۲۳۴۵۶۷۸۹'[i],
u'٠١٢٣٤٥٦٧٨٩'[i], exceptions)
else:
text = pywikibot.replaceExcept(text,
u'٠١٢٣٤٥٦٧٨٩'[i],
u'۰۱۲۳۴۵۶۷۸۹'[i], exceptions)
# do not change digits in class, style and table params
pattern = re.compile(u'=".*?"', re.UNICODE)
exceptions.append(pattern)
# do not change digits inside html-tags
pattern = re.compile(u'<[/]*?[^</]+?[/]*?>', re.UNICODE)
exceptions.append(pattern)
exceptions.append('table') #exclude tables for now
for i in range(0,10):
if self.site.lang=='ckb':
text = pywikibot.replaceExcept(text, str(i),
u'٠١٢٣٤٥٦٧٨٩'[i], exceptions)
else:
text = pywikibot.replaceExcept(text, str(i),
u'۰۱۲۳۴۵۶۷۸۹'[i], exceptions)
return text
# Retrieved from "http://commons.wikimedia.org/wiki/Commons:Tools/pywiki_file_description_cleanup"
def commonsfiledesc(self, text):
# section headers to {{int:}} versions
exceptions = ['comment', 'includeonly', 'math', 'noinclude', 'nowiki',
'pre', 'source', 'ref', 'timeline']
text = pywikibot.replaceExcept(text,
r"([\r\n]|^)\=\= *Summary *\=\=",
r"\1== {{int:filedesc}} ==",
exceptions, True)
text = pywikibot.replaceExcept(
text,
r"([\r\n])\=\= *\[\[Commons:Copyright tags\|Licensing\]\]: *\=\=",
r"\1== {{int:license}} ==", exceptions, True)
text = pywikibot.replaceExcept(
text,
r"([\r\n])\=\= *(Licensing|License information|{{int:license-header}}) *\=\=",
r"\1== {{int:license}} ==", exceptions, True)
# frequent field values to {{int:}} versions
text = pywikibot.replaceExcept(
text,
r'([\r\n]\|[Ss]ource *\= *)(?:[Oo]wn work by uploader|[Oo]wn work|[Ee]igene [Aa]rbeit) *([\r\n])',
r'\1{{own}}\2', exceptions, True)
text = pywikibot.replaceExcept(
text,
r'(\| *Permission *\=) *(?:[Ss]ee below|[Ss]iehe unten) *([\r\n])',
r'\1\2', exceptions, True)
# added to transwikied pages
text = pywikibot.replaceExcept(text, r'__NOTOC__', '', exceptions, True)
# tracker element for js upload form
text = pywikibot.replaceExcept(
text,
r'<!-- *{{ImageUpload\|(?:full|basic)}} *-->',
'', exceptions[1:], True)
text = pywikibot.replaceExcept(text, r'{{ImageUpload\|(?:basic|full)}}',
'', exceptions, True)
# duplicated section headers
text = pywikibot.replaceExcept(
text,
r'([\r\n]|^)\=\= *{{int:filedesc}} *\=\=(?:[\r\n ]*)\=\= *{{int:filedesc}} *\=\=',
r'\1== {{int:filedesc}} ==', exceptions, True)
text = pywikibot.replaceExcept(
text,
r'([\r\n]|^)\=\= *{{int:license}} *\=\=(?:[\r\n ]*)\=\= *{{int:license}} *\=\=',
r'\1== {{int:license}} ==', exceptions, True)
return text
class CosmeticChangesBot:
def __init__(self, generator, acceptall = False,
comment=u'Robot: Cosmetic changes'):
self.generator = generator
self.acceptall = acceptall
self.comment = comment
self.done = False
def treat(self, page):
try:
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% page.title())
ccToolkit = CosmeticChangesToolkit(page.site, debug=True,
namespace=page.namespace(),
pageTitle=page.title())
changedText = ccToolkit.change(page.get())
if changedText.strip() != page.get().strip():
if not self.acceptall:
choice = pywikibot.inputChoice(
u'Do you want to accept these changes?',
['Yes', 'No', 'All', 'Quit'], ['y', 'N', 'a', 'q'], 'N')
if choice == 'a':
self.acceptall = True
elif choice == 'q':
self.done = True
return
if self.acceptall or choice == 'y':
page.put(changedText, comment=self.comment)
else:
pywikibot.output('No changes were necessary in %s'
% page.title())
except pywikibot.NoPage:
pywikibot.output("Page %s does not exist?!"
% page.title(asLink=True))
except pywikibot.IsRedirectPage:
pywikibot.output("Page %s is a redirect; skipping."
% page.title(asLink=True))
except pywikibot.LockedPage:
pywikibot.output("Page %s is locked?!" % page.title(asLink=True))
except pywikibot.EditConflict:
pywikibot.output("An edit conflict has occured at %s."
% page.title(asLink=True))
def run(self):
try:
for page in self.generator:
if self.done: break
self.treat(page)
except KeyboardInterrupt:
raise
#pywikibot.output('\nQuitting program...')
def main():
#page generator
gen = None
pageTitle = []
editSummary = ''
answer = 'y'
always = False
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
genFactory = pagegenerators.GeneratorFactory()
for arg in pywikibot.handleArgs():
if arg.startswith('-summary:'):
editSummary = arg[len('-summary:'):]
elif arg == '-always':
always = True
elif not genFactory.handleArg(arg):
pageTitle.append(arg)
if editSummary == '':
# Load default summary message.
editSummary = i18n.twtranslate(pywikibot.getSite(), msg_standalone)
if pageTitle:
site = pywikibot.getSite()
gen = iter([pywikibot.Page(pywikibot.Link(t, site)) for t in pageTitle])
if not gen:
gen = genFactory.getCombinedGenerator()
if not gen:
pywikibot.showHelp()
else:
if not always:
answer = pywikibot.inputChoice(
warning + '\nDo you really want to continue?',
['yes', 'no'], ['y', 'N'], 'N')
if answer == 'y':
preloadingGen = pagegenerators.PreloadingGenerator(gen)
bot = CosmeticChangesBot(preloadingGen, acceptall=always,
comment=editSummary)
bot.run()
if __name__ == "__main__":
try:
main()
finally:
pywikibot.stopme()
|
{
"content_hash": "03e70bb6e0184726de6d4dd02fcf0623",
"timestamp": "",
"source": "github",
"line_count": 667,
"max_line_length": 222,
"avg_line_length": 46.78110944527736,
"alnum_prop": 0.5424157933532032,
"repo_name": "azatoth/pywikipedia",
"id": "ee371c44ce10eb0f01b8809b2e96867edcd92725",
"size": "31389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/cosmetic_changes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1715902"
}
],
"symlink_target": ""
}
|
"""Config flow for Bond integration."""
from __future__ import annotations
from http import HTTPStatus
import logging
from typing import Any
from aiohttp import ClientConnectionError, ClientResponseError
from bond_api import Bond
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import AbortFlow, FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import DiscoveryInfoType
from .const import DOMAIN
from .utils import BondHub
_LOGGER = logging.getLogger(__name__)
USER_SCHEMA = vol.Schema(
{vol.Required(CONF_HOST): str, vol.Required(CONF_ACCESS_TOKEN): str}
)
DISCOVERY_SCHEMA = vol.Schema({vol.Required(CONF_ACCESS_TOKEN): str})
TOKEN_SCHEMA = vol.Schema({})
async def async_get_token(hass: HomeAssistant, host: str) -> str | None:
"""Try to fetch the token from the bond device."""
bond = Bond(host, "", session=async_get_clientsession(hass))
try:
response: dict[str, str] = await bond.token()
except ClientConnectionError:
return None
return response.get("token")
async def _validate_input(hass: HomeAssistant, data: dict[str, Any]) -> tuple[str, str]:
"""Validate the user input allows us to connect."""
bond = Bond(
data[CONF_HOST], data[CONF_ACCESS_TOKEN], session=async_get_clientsession(hass)
)
try:
hub = BondHub(bond)
await hub.setup(max_devices=1)
except ClientConnectionError as error:
raise InputValidationError("cannot_connect") from error
except ClientResponseError as error:
if error.status == HTTPStatus.UNAUTHORIZED:
raise InputValidationError("invalid_auth") from error
raise InputValidationError("unknown") from error
except Exception as error:
_LOGGER.exception("Unexpected exception")
raise InputValidationError("unknown") from error
# Return unique ID from the hub to be stored in the config entry.
if not hub.bond_id:
raise InputValidationError("old_firmware")
return hub.bond_id, hub.name
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Bond."""
VERSION = 1
def __init__(self) -> None:
"""Initialize config flow."""
self._discovered: dict[str, str] = {}
async def _async_try_automatic_configure(self) -> None:
"""Try to auto configure the device.
Failure is acceptable here since the device may have been
online longer then the allowed setup period, and we will
instead ask them to manually enter the token.
"""
host = self._discovered[CONF_HOST]
if not (token := await async_get_token(self.hass, host)):
return
self._discovered[CONF_ACCESS_TOKEN] = token
_, hub_name = await _validate_input(self.hass, self._discovered)
self._discovered[CONF_NAME] = hub_name
async def async_step_zeroconf(
self, discovery_info: DiscoveryInfoType
) -> FlowResult:
"""Handle a flow initialized by zeroconf discovery."""
name: str = discovery_info[CONF_NAME]
host: str = discovery_info[CONF_HOST]
bond_id = name.partition(".")[0]
await self.async_set_unique_id(bond_id)
for entry in self._async_current_entries():
if entry.unique_id != bond_id:
continue
updates = {CONF_HOST: host}
if entry.state == ConfigEntryState.SETUP_ERROR and (
token := await async_get_token(self.hass, host)
):
updates[CONF_ACCESS_TOKEN] = token
new_data = {**entry.data, **updates}
if new_data != dict(entry.data):
self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
self.hass.async_create_task(
self.hass.config_entries.async_reload(entry.entry_id)
)
raise AbortFlow("already_configured")
self._discovered = {CONF_HOST: host, CONF_NAME: bond_id}
await self._async_try_automatic_configure()
self.context.update(
{
"title_placeholders": {
CONF_HOST: self._discovered[CONF_HOST],
CONF_NAME: self._discovered[CONF_NAME],
}
}
)
return await self.async_step_confirm()
async def async_step_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle confirmation flow for discovered bond hub."""
errors = {}
if user_input is not None:
if CONF_ACCESS_TOKEN in self._discovered:
return self.async_create_entry(
title=self._discovered[CONF_NAME],
data={
CONF_ACCESS_TOKEN: self._discovered[CONF_ACCESS_TOKEN],
CONF_HOST: self._discovered[CONF_HOST],
},
)
data = {
CONF_ACCESS_TOKEN: user_input[CONF_ACCESS_TOKEN],
CONF_HOST: self._discovered[CONF_HOST],
}
try:
_, hub_name = await _validate_input(self.hass, data)
except InputValidationError as error:
errors["base"] = error.base
else:
return self.async_create_entry(
title=hub_name,
data=data,
)
if CONF_ACCESS_TOKEN in self._discovered:
data_schema = TOKEN_SCHEMA
else:
data_schema = DISCOVERY_SCHEMA
return self.async_show_form(
step_id="confirm",
data_schema=data_schema,
errors=errors,
description_placeholders=self._discovered,
)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
bond_id, hub_name = await _validate_input(self.hass, user_input)
except InputValidationError as error:
errors["base"] = error.base
else:
await self.async_set_unique_id(bond_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=hub_name, data=user_input)
return self.async_show_form(
step_id="user", data_schema=USER_SCHEMA, errors=errors
)
class InputValidationError(exceptions.HomeAssistantError):
"""Error to indicate we cannot proceed due to invalid input."""
def __init__(self, base: str) -> None:
"""Initialize with error base."""
super().__init__()
self.base = base
|
{
"content_hash": "e17918936cae3aa338807aa8c6dc75bd",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 88,
"avg_line_length": 35.53,
"alnum_prop": 0.6020264565156206,
"repo_name": "aronsky/home-assistant",
"id": "6f70d37e0a1a3d15586d431a25bcaf9bebe640ea",
"size": "7106",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/bond/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
__source__ = 'https://leetcode.com/problems/complex-number-multiplication/'
# Time: O(1)
# Space: O(1)
#
# Description: Leetcode # 537. Complex Number Multiplication
#
# Given two strings representing two complex numbers.
# https://en.wikipedia.org/wiki/Complex_number
#
# A complex number is a number that can be expressed in the form a + bi,
# where a and b are real numbers and i is the imaginary unit
#
# You need to return a string representing their multiplication. Note i2 = -1 according to the definition.
#
# Example 1:
# Input: "1+1i", "1+1i"
# Output: "0+2i"
# Explanation: (1 + i) * (1 + i) = 1 + i2 + 2 * i = 2i, and you need convert it to the form of 0+2i.
# Example 2:
# Input: "1+-1i", "1+-1i"
# Output: "0+-2i"
# Explanation: (1 - i) * (1 - i) = 1 + i2 - 2 * i = -2i, and you need convert it to the form of 0+-2i.
# Note:
#
# The input strings will not have extra blank.
# The input strings will be given in the form of a+bi,
# where the integer a and b will both belong to the range of [-100, 100]. And the output should be also in this form.
# Hide Company Tags Amazon
# Hide Tags Math String
import unittest
# 20ms 98.88%
class Solution(object):
def complexNumberMultiply(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
a1, a2 = map(int, a[:-1].split('+'))
b1, b2 = map(int, b[:-1].split('+'))
return '%d+%di' % (a1 * b1 - a2 * b2, a1 * b2 + a2 * b1)
class FooTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setupDown(self):
pass
def tearDown(self):
pass
def test_foo(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
# run all tests
unittest.main()
# run one test
#unittest.main(defaultTest='FooTest.test_foo', warnings='ignore')
Java = '''
# Thought: https://leetcode.com/problems/complex-number-multiplication/solution/
Java 3-liner
This solution relies on the fact that (a+bi)(c+di) = (ac - bd) + (ad+bc)i.
Approach #1 Simple Solution[Accepted]
Complexity Analysis
Time complexity : O(1). Here splitting takes constant time as length of the string is very small (<20).
Space complexity : O(1). Constant extra space is used.
# 7ms 34.32%
public class Solution {
public String complexNumberMultiply(String a, String b) {
String x[] = a.split("\\+|i");
String y[] = b.split("\\+|i");
int a_real = Integer.parseInt(x[0]);
int a_img = Integer.parseInt(x[1]);
int b_real = Integer.parseInt(y[0]);
int b_img = Integer.parseInt(y[1]);
return (a_real * b_real - a_img * b_img) + "+" + (a_real * b_img + a_img * b_real) + "i";
}
}
# 51ms 3.37%
public class Solution {
public String complexNumberMultiply(String a, String b) {
int[] coefs1 = Stream.of(a.split("\\+|i")).mapToInt(Integer::parseInt).toArray(),
coefs2 = Stream.of(b.split("\\+|i")).mapToInt(Integer::parseInt).toArray();
return (coefs1[0]*coefs2[0] - coefs1[1]*coefs2[1]) + "+"
+ (coefs1[0]*coefs2[1] + coefs1[1]*coefs2[0]) + "i";
}
}
# 2ms 100%
class Solution {
String[] split(String a) {
String[] ans = new String[2];
for (int i = 0; i < a.length(); i++) {
if (a.charAt(i) == '+') {
ans[0] = a.substring(0, i);
ans[1] = a.substring(i + 1, a.length() - 1);
break;
}
}
return ans;
}
public String complexNumberMultiply(String a, String b) {
String[] str = split(a);
int al = Integer.valueOf(str[0]), ar = Integer.valueOf(str[1]);
str = split(b);
int bl = Integer.valueOf(str[0]), br = Integer.valueOf(str[1]);
int l = al * bl - ar * br;
int r = al * br + ar * bl;
StringBuilder sb = new StringBuilder();
sb.append(l);
sb.append("+");
sb.append(r);
sb.append("i");
return sb.toString();
}
}
'''
|
{
"content_hash": "c1143289ca99b8628463334b00e99aa5",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 117,
"avg_line_length": 29.897058823529413,
"alnum_prop": 0.5757501229709788,
"repo_name": "JulyKikuAkita/PythonPrac",
"id": "7c9c72816505280ea4f8e539d5530b21b02c41f1",
"size": "4066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cs15211/ComplexNumberMultiplication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "647778"
},
{
"name": "Python",
"bytes": "5429558"
}
],
"symlink_target": ""
}
|
import os, sys, subprocess, difflib
from scripts.test.support import run_command, split_wast
print '[ processing and updating testcases... ]\n'
for asm in sorted(os.listdir('test')):
if asm.endswith('.asm.js'):
for precise in [0, 1, 2]:
for opts in [1, 0]:
cmd = [os.path.join('bin', 'asm2wasm'), os.path.join('test', asm)]
wasm = asm.replace('.asm.js', '.fromasm')
if not precise:
cmd += ['--emit-potential-traps', '--ignore-implicit-traps']
wasm += '.imprecise'
elif precise == 2:
cmd += ['--emit-clamped-potential-traps']
wasm += '.clamp'
if not opts:
wasm += '.no-opts'
if precise:
cmd += ['-O0'] # test that -O0 does nothing
else:
cmd += ['-O']
if 'debugInfo' in asm:
cmd += ['-g']
if precise and opts:
# test mem init importing
open('a.mem', 'wb').write(asm)
cmd += ['--mem-init=a.mem']
if asm[0] == 'e':
cmd += ['--mem-base=1024']
if 'i64' in asm or 'wasm-only' in asm:
cmd += ['--wasm-only']
print ' '.join(cmd)
actual = run_command(cmd)
with open(os.path.join('test', wasm), 'w') as o: o.write(actual)
for dot_s_dir in ['dot_s', 'llvm_autogenerated']:
for s in sorted(os.listdir(os.path.join('test', dot_s_dir))):
if not s.endswith('.s'): continue
print '..', s
wasm = s.replace('.s', '.wast')
full = os.path.join('test', dot_s_dir, s)
stack_alloc = ['--allocate-stack=1024'] if dot_s_dir == 'llvm_autogenerated' else []
cmd = [os.path.join('bin', 's2wasm'), full, '--emscripten-glue'] + stack_alloc
if s.startswith('start_'):
cmd.append('--start')
actual = run_command(cmd, stderr=subprocess.PIPE, expected_err='')
expected_file = os.path.join('test', dot_s_dir, wasm)
with open(expected_file, 'w') as o: o.write(actual)
'''
for wasm in ['address.wast']:#os.listdir(os.path.join('test', 'spec')):
if wasm.endswith('.wast'):
print '..', wasm
asm = wasm.replace('.wast', '.2asm.js')
proc = subprocess.Popen([os.path.join('bin', 'wasm2asm'), os.path.join('test', 'spec', wasm)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
actual, err = proc.communicate()
assert proc.returncode == 0, err
assert err == '', 'bad err:' + err
expected_file = os.path.join('test', asm)
open(expected_file, 'w').write(actual)
'''
for t in sorted(os.listdir(os.path.join('test', 'print'))):
if t.endswith('.wast'):
print '..', t
wasm = os.path.basename(t).replace('.wast', '')
cmd = [os.path.join('bin', 'wasm-shell'), os.path.join('test', 'print', t), '--print']
print ' ', ' '.join(cmd)
actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
with open(os.path.join('test', 'print', wasm + '.txt'), 'w') as o: o.write(actual)
cmd = [os.path.join('bin', 'wasm-shell'), os.path.join('test', 'print', t), '--print-minified']
print ' ', ' '.join(cmd)
actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
with open(os.path.join('test', 'print', wasm + '.minified.txt'), 'w') as o: o.write(actual)
for t in sorted(os.listdir(os.path.join('test', 'passes'))):
if t.endswith('.wast'):
print '..', t
passname = os.path.basename(t).replace('.wast', '')
opts = ['-' + passname] if passname.startswith('O') else ['--' + p for p in passname.split('_')]
t = os.path.join('test', 'passes', t)
actual = ''
for module, asserts in split_wast(t):
assert len(asserts) == 0
with open('split.wast', 'w') as o: o.write(module)
cmd = [os.path.join('bin', 'wasm-opt')] + opts + ['split.wast', '--print']
actual += run_command(cmd)
with open(os.path.join('test', 'passes', passname + '.txt'), 'w') as o: o.write(actual)
print '\n[ checking wasm-opt -o notation... ]\n'
wast = os.path.join('test', 'hello_world.wast')
cmd = [os.path.join('bin', 'wasm-opt'), wast, '-o', 'a.wast', '-S']
run_command(cmd)
open(wast, 'w').write(open('a.wast').read())
print '\n[ checking binary format testcases... ]\n'
for wast in sorted(os.listdir('test')):
if wast.endswith('.wast') and not wast in []: # blacklist some known failures
for debug_info in [0, 1]:
cmd = [os.path.join('bin', 'wasm-as'), os.path.join('test', wast), '-o', 'a.wasm']
if debug_info: cmd += ['-g']
print ' '.join(cmd)
if os.path.exists('a.wasm'): os.unlink('a.wasm')
subprocess.check_call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert os.path.exists('a.wasm')
cmd = [os.path.join('bin', 'wasm-dis'), 'a.wasm', '-o', 'a.wast']
print ' '.join(cmd)
if os.path.exists('a.wast'): os.unlink('a.wast')
subprocess.check_call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert os.path.exists('a.wast')
actual = open('a.wast').read()
binary_name = wast + '.fromBinary'
if not debug_info: binary_name += '.noDebugInfo'
with open(os.path.join('test', binary_name), 'w') as o: o.write(actual)
print '\n[ checking example testcases... ]\n'
for t in sorted(os.listdir(os.path.join('test', 'example'))):
output_file = os.path.join('bin', 'example')
cmd = ['-Isrc', '-g', '-lasmjs', '-lsupport', '-Llib/.', '-pthread', '-o', output_file]
if t.endswith('.txt'):
# check if there is a trace in the file, if so, we should build it
out = subprocess.Popen([os.path.join('scripts', 'clean_c_api_trace.py'), os.path.join('test', 'example', t)], stdout=subprocess.PIPE).communicate()[0]
if len(out) == 0:
print ' (no trace in ', t, ')'
continue
print ' (will check trace in ', t, ')'
src = 'trace.cpp'
with open(src, 'w') as o: o.write(out)
expected = os.path.join('test', 'example', t + '.txt')
else:
src = os.path.join('test', 'example', t)
expected = os.path.join('test', 'example', '.'.join(t.split('.')[:-1]) + '.txt')
if src.endswith(('.c', '.cpp')):
# build the C file separately
extra = [os.environ.get('CC') or 'gcc',
src, '-c', '-o', 'example.o',
'-Isrc', '-g', '-Llib/.', '-pthread']
print 'build: ', ' '.join(extra)
subprocess.check_call(extra)
# Link against the binaryen C library DSO, using an executable-relative rpath
cmd = ['example.o', '-lbinaryen'] + cmd + ['-Wl,-rpath=$ORIGIN/../lib']
else:
continue
print ' ', t, src, expected
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
cmd.append(f)
cmd = [os.environ.get('CXX') or 'g++', '-std=c++11'] + cmd
try:
print 'link: ', ' '.join(cmd)
subprocess.check_call(cmd)
print 'run...', output_file
proc = subprocess.Popen([output_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
actual, err = proc.communicate()
assert proc.returncode == 0, [proc.returncode, actual, err]
with open(expected, 'w') as o: o.write(actual)
finally:
os.remove(output_file)
if sys.platform == 'darwin':
# Also removes debug directory produced on Mac OS
shutil.rmtree(output_file + '.dSYM')
print '\n[ checking wasm-opt testcases... ]\n'
for t in os.listdir('test'):
if t.endswith('.wast') and not t.startswith('spec'):
print '..', t
t = os.path.join('test', t)
f = t + '.from-wast'
cmd = [os.path.join('bin', 'wasm-opt'), t, '--print']
actual = run_command(cmd)
actual = actual.replace('printing before:\n', '')
open(f, 'w').write(actual)
print '\n[ checking wasm-dis on provided binaries... ]\n'
for t in os.listdir('test'):
if t.endswith('.wasm') and not t.startswith('spec'):
print '..', t
t = os.path.join('test', t)
cmd = [os.path.join('bin', 'wasm-dis'), t]
actual = run_command(cmd)
open(t + '.fromBinary', 'w').write(actual)
print '\n[ checking wasm-merge... ]\n'
for t in os.listdir(os.path.join('test', 'merge')):
if t.endswith(('.wast', '.wasm')):
print '..', t
t = os.path.join('test', 'merge', t)
u = t + '.toMerge'
for finalize in [0, 1]:
for opt in [0, 1]:
cmd = [os.path.join('bin', 'wasm-merge'), t, u, '-o', 'a.wast', '-S', '--verbose']
if finalize: cmd += ['--finalize-memory-base=1024', '--finalize-table-base=8']
if opt: cmd += ['-O']
stdout = run_command(cmd)
actual = open('a.wast').read()
out = t + '.combined'
if finalize: out += '.finalized'
if opt: out += '.opt'
with open(out, 'w') as o: o.write(actual)
with open(out + '.stdout', 'w') as o: o.write(stdout)
print '\n[ checking binaryen.js testcases... ]\n'
for s in sorted(os.listdir(os.path.join('test', 'binaryen.js'))):
if not s.endswith('.js'): continue
print s
f = open('a.js', 'w')
f.write(open(os.path.join('bin', 'binaryen.js')).read())
f.write(open(os.path.join('test', 'binaryen.js', s)).read())
f.close()
cmd = ['mozjs', 'a.js']
out = run_command(cmd, stderr=subprocess.STDOUT)
open(os.path.join('test', 'binaryen.js', s + '.txt'), 'w').write(out)
print '\n[ success! ]'
|
{
"content_hash": "b3a77666efa9d53378a791ca977a2966",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 154,
"avg_line_length": 40.45374449339207,
"alnum_prop": 0.576608951323097,
"repo_name": "yurydelendik/binaryen",
"id": "76cd02cbbb3fd11935a081e18d2b960d29551315",
"size": "9206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auto_update_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5614793"
},
{
"name": "C",
"bytes": "74254"
},
{
"name": "C++",
"bytes": "1750409"
},
{
"name": "CMake",
"bytes": "10715"
},
{
"name": "JavaScript",
"bytes": "469918"
},
{
"name": "Python",
"bytes": "83994"
},
{
"name": "Shell",
"bytes": "9035"
}
],
"symlink_target": ""
}
|
from threading import Thread
import subprocess
from time import sleep
import signal
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import os
import sys
g_RD_Robot_Roslaunch_Config_Dir = os.environ['RD_SYSTEM_CONFIG_DIR']
g_RD_Robot_Roslaunch_Config_File = os.environ['RD_ROBOT_ROSLAUNCH_CONFIG_FILE']
g_RD_Robot_Roslaunch_Config_File_FullPath = os.environ['RD_ROBOT_ROSLAUNCH_CONFIG_FILE_FULLPATH']
g_roslaunch_Process = None
g_SIGNALS_TO_NAMES_DICT = dict((getattr(signal, n), n) \
for n in dir(signal) if n.startswith('SIG') and '_' not in n )
def ternimateg_roslaunch():
global g_roslaunch_Process
if g_roslaunch_Process is not None:
# only terminate when poll() return None, which means it's still running
if g_roslaunch_Process.poll() is None:
print('terminate roslaunch process')
g_roslaunch_Process.terminate()
def signal_handler(signal, frame):
global g_SIGNALS_TO_NAMES_DICT
ternimateg_roslaunch()
print('####Robot_Auto_Roslaunch signal recived: ', g_SIGNALS_TO_NAMES_DICT[signal])
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def readConfigFile():
global g_RD_Robot_Roslaunch_Config_File_FullPath
lines = None
with open(g_RD_Robot_Roslaunch_Config_File_FullPath) as f:
lines = f.readlines()[0].replace('\n', '').split(' ')
print('####Reading config from ', g_RD_Robot_Roslaunch_Config_File_FullPath, ', \ncontent: ', lines)
return lines
def lanuchRoslaunch():
global g_roslaunch_Process
ternimateg_roslaunch()
launchFileName = readConfigFile()
args = ["roslaunch"]
args.extend(launchFileName)
print('####launching subprocess with command line = ', args)
g_roslaunch_Process = subprocess.Popen(args)
def tryLanuchRoslaunch():
global g_roslaunch_Process
if g_roslaunch_Process is None:
print('####launch roslaunch from None')
lanuchRoslaunch()
# g_roslaunch_Process.poll() return None if subprocess is still alive
elif g_roslaunch_Process.poll() is not None:
print('####restarting roslaunch')
lanuchRoslaunch()
class MyFileChangeHandler(FileSystemEventHandler):
def on_modified(self, event):
global g_roslaunch_Process
global g_RD_Robot_Roslaunch_Config_File_FullPath
print("####file changed detected: ", event.src_path, event.event_type) # print now only for degug
if event.src_path == g_RD_Robot_Roslaunch_Config_File_FullPath:
print('#######on launch config change roslaunch')
lanuchRoslaunch()
if __name__ == "__main__":
event_handler = MyFileChangeHandler()
observer = Observer()
print('####config path = ', g_RD_Robot_Roslaunch_Config_Dir)
observer.schedule(event_handler, path=g_RD_Robot_Roslaunch_Config_Dir, recursive=False)
tryLanuchRoslaunch()
observer.start()
try:
while True:
time.sleep(3)
# print('### poll value: ', g_roslaunch_Process.poll())
tryLanuchRoslaunch()
except KeyboardInterrupt:
observer.stop()
observer.join()
ternimateg_roslaunch()
# process_1 = subprocess.Popen(["roslaunch", "abb_irb6400_moveit_confignonEndEffector", "demolite.launch"])
# sleep(5)
# print("thread status-------", thread.is_alive())
# process_1.terminate()
print("finisheddddddddddddd...exiting")
|
{
"content_hash": "ad0672d2c38a8a47dfd5b7c30a958e66",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 111,
"avg_line_length": 35.54081632653061,
"alnum_prop": 0.6821705426356589,
"repo_name": "cuixiongyi/setup_workspace",
"id": "f530757f1c8accbc1d5d7bac43720bfaf43af253",
"size": "3731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/autoStartRobotRoslaunchFile.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1018"
},
{
"name": "Python",
"bytes": "3731"
},
{
"name": "Shell",
"bytes": "38822"
}
],
"symlink_target": ""
}
|
import math
from ransac import Ransac, Result
class Line:
def __init__(self, params, x_range, y_range, error):
self.params = params
self.x_range = x_range
self.y_range = y_range
self.error = error
# Ransac: iterations, threshDist, minInlier, threshAngle
ransac = Ransac(20, 50, 10, math.radians(5))
def detectLines(edgelFrame):
lines = []
outlier = edgelFrame
while 1:
result = ransac.getOutlier(outlier)
if not result:
break
error = math.pow(math.e, -0.1 * result.error / len(result.inlier) )
line = Line(result.getLine(), result.x_range, result.y_range, error)
lines.append(line)
outlier = result.outlier
return lines
|
{
"content_hash": "2f70b611839477e47c9572327ed36667",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 21.285714285714285,
"alnum_prop": 0.6134228187919463,
"repo_name": "BerlinUnited/NaoTH",
"id": "2487675297d3e82744cc52dda72f2e20b45562da",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Utils/py/lineDetection/line_detector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "319"
},
{
"name": "C",
"bytes": "16295"
},
{
"name": "C++",
"bytes": "3831321"
},
{
"name": "CSS",
"bytes": "8839"
},
{
"name": "HTML",
"bytes": "21148"
},
{
"name": "Java",
"bytes": "1816793"
},
{
"name": "Jupyter Notebook",
"bytes": "8092"
},
{
"name": "Lua",
"bytes": "73794"
},
{
"name": "MATLAB",
"bytes": "141780"
},
{
"name": "Python",
"bytes": "1337382"
},
{
"name": "Shell",
"bytes": "60599"
}
],
"symlink_target": ""
}
|
"""
Splunk Search State Module
.. versionadded:: 2015.5.0
This state is used to ensure presence of splunk searches.
.. code-block:: yaml
server-warning-message:
splunk_search.present:
- name: This is the splunk search name
- search: index=main sourcetype=
"""
def __virtual__():
"""
Only load if the splunk_search module is available in __salt__
"""
if "splunk_search.get" in __salt__:
return "splunk_search"
return (False, "splunk module could not be loaded")
def present(name, profile="splunk", **kwargs):
"""
Ensure a search is present
.. code-block:: yaml
API Error Search:
splunk_search.present:
search: index=main sourcetype=blah
template: alert_5min
The following parameters are required:
name
This is the name of the search in splunk
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
target = __salt__["splunk_search.get"](name, profile=profile)
if target:
if __opts__["test"]:
ret["comment"] = "Would update {}".format(name)
return ret
# found a search... updating
result = __salt__["splunk_search.update"](name, profile=profile, **kwargs)
if not result:
# no update
ret["result"] = True
ret["comment"] = "No changes"
else:
(newvalues, diffs) = result
old_content = dict(target.content)
old_changes = {}
for x in newvalues:
old_changes[x] = old_content.get(x, None)
ret["result"] = True
ret["changes"]["diff"] = diffs
ret["changes"]["old"] = old_changes
ret["changes"]["new"] = newvalues
else:
if __opts__["test"]:
ret["comment"] = "Would create {}".format(name)
return ret
# creating a new search
result = __salt__["splunk_search.create"](name, profile=profile, **kwargs)
if result:
ret["result"] = True
ret["changes"]["old"] = False
ret["changes"]["new"] = kwargs
else:
ret["result"] = False
ret["comment"] = "Failed to create {}".format(name)
return ret
def absent(name, profile="splunk"):
"""
Ensure a search is absent
.. code-block:: yaml
API Error Search:
splunk_search.absent
The following parameters are required:
name
This is the name of the search in splunk
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "{} is absent.".format(name),
}
target = __salt__["splunk_search.get"](name, profile=profile)
if target:
if __opts__["test"]:
ret = {}
ret["name"] = name
ret["comment"] = "Would delete {}".format(name)
ret["result"] = None
return ret
result = __salt__["splunk_search.delete"](name, profile=profile)
if result:
ret["comment"] = "{} was deleted".format(name)
else:
ret["comment"] = "Failed to delete {}".format(name)
ret["result"] = False
return ret
|
{
"content_hash": "f4d8a6609b12cbde368f61540f077163",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 82,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.5290858725761773,
"repo_name": "saltstack/salt",
"id": "a12ca481e295567542ca52d249f221673d144f27",
"size": "3249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/states/splunk_search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
"""List missing tracks.
"""
from beets.autotag import hooks
from beets.library import Item
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_obj, Subcommand
def _missing_count(album):
"""Return number of missing items in `album`.
"""
return (album.tracktotal or 0) - len(album.items())
def _item(track_info, album_info, album_id):
"""Build and return `item` from `track_info` and `album info`
objects. `item` is missing what fields cannot be obtained from
MusicBrainz alone (encoder, rg_track_gain, rg_track_peak,
rg_album_gain, rg_album_peak, original_year, original_month,
original_day, length, bitrate, format, samplerate, bitdepth,
channels, mtime.)
"""
t = track_info
a = album_info
return Item(**{
'album_id': album_id,
'album': a.album,
'albumartist': a.artist,
'albumartist_credit': a.artist_credit,
'albumartist_sort': a.artist_sort,
'albumdisambig': a.albumdisambig,
'albumstatus': a.albumstatus,
'albumtype': a.albumtype,
'artist': t.artist,
'artist_credit': t.artist_credit,
'artist_sort': t.artist_sort,
'asin': a.asin,
'catalognum': a.catalognum,
'comp': a.va,
'country': a.country,
'day': a.day,
'disc': t.medium,
'disctitle': t.disctitle,
'disctotal': a.mediums,
'label': a.label,
'language': a.language,
'length': t.length,
'mb_albumid': a.album_id,
'mb_artistid': t.artist_id,
'mb_releasegroupid': a.releasegroup_id,
'mb_trackid': t.track_id,
'media': t.media,
'month': a.month,
'script': a.script,
'title': t.title,
'track': t.index,
'tracktotal': len(a.tracks),
'year': a.year,
})
class MissingPlugin(BeetsPlugin):
"""List missing tracks
"""
def __init__(self):
super(MissingPlugin, self).__init__()
self.config.add({
'format': None,
'count': False,
'total': False,
})
self.album_template_fields['missing'] = _missing_count
self._command = Subcommand('missing',
help=__doc__,
aliases=['miss'])
self._command.parser.add_option('-f', '--format', dest='format',
action='store', type='string',
help='print with custom FORMAT',
metavar='FORMAT')
self._command.parser.add_option('-c', '--count', dest='count',
action='store_true',
help='count missing tracks per album')
self._command.parser.add_option('-t', '--total', dest='total',
action='store_true',
help='count total of missing tracks')
def commands(self):
def _miss(lib, opts, args):
self.config.set_args(opts)
fmt = self.config['format'].get()
count = self.config['count'].get()
total = self.config['total'].get()
albums = lib.albums(decargs(args))
if total:
print(sum([_missing_count(a) for a in albums]))
return
# Default format string for count mode.
if count and not fmt:
fmt = '$albumartist - $album: $missing'
for album in albums:
if count:
missing = _missing_count(album)
if missing:
print_obj(album, lib, fmt=fmt)
else:
for item in self._missing(album):
print_obj(item, lib, fmt=fmt)
self._command.func = _miss
return [self._command]
def _missing(self, album):
"""Query MusicBrainz to determine items missing from `album`.
"""
item_mbids = map(lambda x: x.mb_trackid, album.items())
if len([i for i in album.items()]) < album.tracktotal:
# fetch missing items
# TODO: Implement caching that without breaking other stuff
album_info = hooks.album_for_mbid(album.mb_albumid)
for track_info in getattr(album_info, 'tracks', []):
if track_info.track_id not in item_mbids:
item = _item(track_info, album_info, album.id)
self._log.debug(u'track {1} in album {2}',
track_info.track_id, album_info.album_id)
yield item
|
{
"content_hash": "c5ee0a94f69de24672b0ac1a27840d36",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 36.87591240875913,
"alnum_prop": 0.47961203483768805,
"repo_name": "andremiller/beets",
"id": "a27be65d1967c35750f76d38689026fdf609c78c",
"size": "5696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beetsplug/missing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "JavaScript",
"bytes": "86414"
},
{
"name": "Makefile",
"bytes": "5073"
},
{
"name": "Python",
"bytes": "1368623"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
}
|
import os
import urlparse
import mock
from oslo.config import cfg
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api.extensions import PluginAwareExtensionManager
from neutron.api.v2 import attributes
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron.common import config
from neutron.common import exceptions as q_exc
from neutron import context
from neutron.manager import NeutronManager
from neutron.openstack.common.notifier import api as notifer_api
from neutron.openstack.common import policy as common_policy
from neutron.openstack.common import uuidutils
from neutron.tests import base
from neutron.tests.unit import testlib_api
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
EXTDIR = os.path.join(ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def _get_path(resource, id=None, action=None, fmt=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if fmt is not None:
path = path + '.%s' % fmt
return path
class ResourceIndexTestCase(base.BaseTestCase):
def test_index_json(self):
index = webtest.TestApp(router.Index({'foo': 'bar'}))
res = index.get('')
self.assertIn('resources', res.json)
self.assertEqual(len(res.json['resources']), 1)
resource = res.json['resources'][0]
self.assertIn('collection', resource)
self.assertEqual(resource['collection'], 'bar')
self.assertIn('name', resource)
self.assertEqual(resource['name'], 'foo')
self.assertIn('links', resource)
self.assertEqual(len(resource['links']), 1)
link = resource['links'][0]
self.assertIn('href', link)
self.assertEqual(link['href'], 'http://localhost/bar')
self.assertIn('rel', link)
self.assertEqual(link['rel'], 'self')
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure 'stale' patched copies of the plugin are never returned
NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
PluginAwareExtensionManager._instance = None
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
self.addCleanup(self._plugin_patcher.stop)
self.addCleanup(cfg.CONF.reset)
api = router.APIRouter()
self.api = webtest.TestApp(api)
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource]
policy_attrs = [name for (name, info) in attr_info.items()
if info.get('required_by_policy') or
info.get('primary_key')]
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=[], **kwargs):
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict((arg, mock.ANY)
for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo', 'bar'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(q_exc.Invalid, router.APIRouter)
def test_native_pagination_without_allow_sorting(self):
cfg.CONF.set_override('allow_sorting', False)
instance = self.plugin.return_value
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def setUp(self):
super(JSONV2TestCase, self).setUp()
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(len(res['networks']), 1)
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in input_dict.iteritems():
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(len(res['networks']), 0)
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 2)
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(next_links), 1)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(urlparse.parse_qs(url.query), params)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), params)
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(urlparse.parse_qs(url.query),
expected_params)
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
def test_create_use_defaults(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['admin_state_up'], True)
self.assertEqual(net['status'], "ACTIVE")
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True,
extra_environ=env)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_no_body(self):
data = {'whoa': None}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_no_resource(self):
data = {}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bulk_no_networks(self):
data = {'networks': []}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': unicode(tenant_id)}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(port['network_id'], net_id)
self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef')
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
common_policy._rules['get_network:name'] = common_policy.parse_rule(
"rule:admin_only")
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
try:
self.assertNotIn('name', res['network'])
finally:
del common_policy._rules['get_network:name']
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
plugin = 'neutron.tests.unit.test_api_v2.TestSubresourcePlugin'
NeutronManager._instance = None
PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
cfg.CONF.set_override('core_plugin', plugin)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
self.addCleanup(self._plugin_patcher.stop)
self.addCleanup(cfg.CONF.reset)
router.SUB_RESOURCES['dummy'] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
api = router.APIRouter()
self.api = webtest.TestApp(api)
def tearDown(self):
router.SUB_RESOURCES = {}
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
super(SubresourceTest, self).tearDown()
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class XMLV2TestCase(JSONV2TestCase):
fmt = 'xml'
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def _resource_op_notifier(self, opname, resource, expected_errors=False,
notification_level='INFO'):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(notifer_api, 'notify') as mynotifier:
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected = [mock.call(mock.ANY,
'network.' + cfg.CONF.host,
resource + "." + opname + ".start",
notification_level,
mock.ANY),
mock.call(mock.ANY,
'network.' + cfg.CONF.host,
resource + "." + opname + ".end",
notification_level,
mock.ANY)]
self.assertEqual(expected, mynotifier.call_args_list)
self.assertEqual(res.status_int, expected_code)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
def test_network_create_notifer_with_log_level(self):
cfg.CONF.set_override('default_notification_level', 'DEBUG')
self._resource_op_notifier('create', 'network',
notification_level='DEBUG')
class QuotaTest(APIv2TestBase):
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure 'stale' patched copies of the plugin are never returned
NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin and extensions path
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
NeutronManager.get_plugin().supported_extension_aliases = ["v2attrs"]
api = router.APIRouter()
self.api = webtest.TestApp(api)
def tearDown(self):
super(ExtensionTestCase, self).tearDown()
self._plugin_patcher.stop()
self.api = None
self.plugin = None
cfg.CONF.reset()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_extended_create(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertEqual(net['v2attrs:something'], "123")
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin():
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(sorted(actual_val), expect_val)
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, None,
["fields"]))
def test_blank_values(self):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(actual_val, expect_val)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': attributes.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': attributes.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
|
{
"content_hash": "a2487d51f98494c409f18f0dab8f9ee7",
"timestamp": "",
"source": "github",
"line_count": 1485,
"max_line_length": 79,
"avg_line_length": 41.96026936026936,
"alnum_prop": 0.5503201681886023,
"repo_name": "citrix-openstack-build/neutron",
"id": "3268bbac18ce92b4671d553849b6cb96930cc881",
"size": "62997",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_api_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6817315"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import errno
import json
import os
from flask import g, session
from flask_babel import Domain
from flask_pluginengine import (Plugin, PluginBlueprintMixin, PluginBlueprintSetupStateMixin, PluginEngine,
current_plugin, render_plugin_template, wrap_in_plugin_context)
from werkzeug.utils import cached_property
from indico.core import signals
from indico.core.db.sqlalchemy.util.models import get_all_models, import_all_models
from indico.core.logger import Logger
from indico.core.settings import SettingsProxy
from indico.core.webpack import IndicoManifestLoader
from indico.modules.events.settings import EventSettingsProxy
from indico.modules.events.static.util import RewrittenManifest
from indico.modules.users import UserSettingsProxy
from indico.util.decorators import cached_classproperty, classproperty
from indico.util.enum import IndicoEnum
from indico.util.i18n import NullDomain, _
from indico.web.flask.templating import get_template_module, register_template_hook
from indico.web.flask.util import url_for, url_rule_to_js
from indico.web.flask.wrappers import IndicoBlueprint, IndicoBlueprintSetupState
from indico.web.menu import SideMenuItem
from indico.web.views import WPJinjaMixin
class PluginCategory(str, IndicoEnum):
search = _('Search')
synchronization = _('Synchronization')
payment = _('Payment')
importers = _('Importers')
videoconference = _('Videoconference')
other = _('Other')
def __str__(self):
return self.value
class IndicoPlugin(Plugin):
"""Base class for an Indico plugin.
All your plugins need to inherit from this class. It extends the
`Plugin` class from Flask-PluginEngine with useful indico-specific
functionality that makes it easier to write custom plugins.
When creating your plugin, the class-level docstring is used to
generate the friendly name and description of a plugin. Its first
line becomes the name while everything else goes into the description.
This class provides methods for some of the more common hooks Indico
provides. Additional signals are defined in :mod:`~indico.core.signals`
and can be connected to custom functions using :meth:`connect`.
"""
#: WTForm for the plugin's settings (requires `configurable=True`).
#: All fields must return JSON-serializable types.
settings_form = None
#: A dictionary which can contain the kwargs for a specific field in the `settings_form`.
settings_form_field_opts = {}
#: A dictionary containing default values for settings
default_settings = {}
#: A dictionary containing default values for event-specific settings
default_event_settings = {}
#: A dictionary containing default values for user-specific settings
default_user_settings = {}
#: A set containing the names of settings which store ACLs
acl_settings = frozenset()
#: A set containing the names of event-specific settings which store ACLs
acl_event_settings = frozenset()
#: A dict containing custom converters for settings
settings_converters = {}
#: A dict containing custom converters for event-specific settings
event_settings_converters = {}
#: A dict containing custom converters for user-specific settings
user_settings_converters = {}
#: If the plugin should link to a details/config page in the admin interface
configurable = False
#: The group category that the plugin belongs to
category = None
#: If `settings`, `event_settings` and `user_settings` should use strict
#: mode, i.e. only allow keys in `default_settings`, `default_event_settings`
#: or `default_user_settings` (or the related `acl_settings` sets).
#: This should not be disabled in most cases; if you need to store arbitrary
#: keys, consider storing a dict inside a single top-level setting.
strict_settings = True
def init(self):
"""Called when the plugin is being loaded/initialized.
If you want to run custom initialization code, this is the
method to override. Make sure to call the base method or
the other overridable methods in this class will not be
called anymore.
"""
assert self.configurable or not self.settings_form, 'Non-configurable plugin cannot have a settings form'
self.alembic_versions_path = os.path.join(self.root_path, 'migrations')
self.connect(signals.plugin.get_blueprints, lambda app: self.get_blueprints())
self.template_hook('vars-js', self.inject_vars_js)
self._import_models()
def _import_models(self):
old_models = get_all_models()
import_all_models(self.package_name)
added_models = get_all_models() - old_models
# Ensure that only plugin schemas have been touched. It would be nice if we could actually
# restrict a plugin to plugin_PLUGNNAME but since we load all models from the plugin's package
# which could contain more than one plugin this is not easily possible.
for model in added_models:
schema = model.__table__.schema
# Allow models with non-plugin schema if they specify `polymorphic_identity` without a dedicated table
if ('polymorphic_identity' in getattr(model, '__mapper_args__', ())
and '__tablename__' not in model.__dict__):
continue
if not schema.startswith('plugin_'):
raise Exception("Plugin '{}' added a model which is not in a plugin schema ('{}' in '{}')"
.format(self.name, model.__name__, schema))
def connect(self, signal, receiver, **connect_kwargs):
connect_kwargs['weak'] = False
func = wrap_in_plugin_context(self, receiver)
func.indico_plugin = self
signal.connect(func, **connect_kwargs)
def get_blueprints(self):
"""Return blueprints to be registered on the application.
A single blueprint can be returned directly, for multiple blueprint you need
to yield them or return an iterable.
"""
def get_vars_js(self):
"""Return a dictionary with variables to be added to vars.js file."""
return None
@cached_property
def translation_path(self):
"""Return translation files to be used by the plugin.
By default, get <root_path>/translations, unless it does not exist.
"""
translations_path = os.path.join(self.root_path, 'translations')
return translations_path if os.path.exists(translations_path) else None
@cached_property
def translation_domain(self):
"""Return the domain for this plugin's translation_path."""
path = self.translation_path
return Domain(path) if path else NullDomain()
def _get_manifest(self):
try:
loader = IndicoManifestLoader(custom=False)
return loader.load(os.path.join(self.root_path, 'static', 'dist', 'manifest.json'))
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
return None
@property
def manifest(self):
if g.get('static_site') and 'custom_manifests' in g:
try:
return g.custom_manifests[self.name]
except KeyError:
manifest = self._get_manifest()
g.custom_manifests[self.name] = RewrittenManifest(manifest) if manifest else None
return g.custom_manifests[self.name]
return self._get_manifest()
def inject_bundle(self, name, view_class=None, subclasses=True, condition=None):
"""Inject an asset bundle into Indico's pages.
:param name: Name of the bundle
:param view_class: If a WP class is specified, only inject it into pages using that class
:param subclasses: also inject into subclasses of `view_class`
:param condition: a callable to determine whether to inject or not. only called, when the
view_class criterion matches
"""
def _do_inject(sender):
if condition is None or condition():
try:
return self.manifest[name]
except TypeError:
raise RuntimeError(f'Assets for plugin {self.name} have not been built')
if view_class is None:
self.connect(signals.plugin.inject_bundle, _do_inject)
elif not subclasses:
self.connect(signals.plugin.inject_bundle, _do_inject, sender=view_class)
else:
def _func(sender):
if issubclass(sender, view_class):
return _do_inject(sender)
self.connect(signals.plugin.inject_bundle, _func)
def inject_vars_js(self):
"""
Return a string that will define variables for the plugin in
the vars.js file.
"""
vars_js = self.get_vars_js()
if vars_js:
return f'var {self.name.title()}Plugin = {json.dumps(vars_js)};'
def template_hook(self, name, receiver, priority=50, markup=True):
"""Register a function to be called when a template hook is invoked.
For details see :func:`~indico.web.flask.templating.register_template_hook`.
"""
register_template_hook(name, receiver, priority, markup, self)
@classproperty
@classmethod
def logger(cls):
return Logger.get(f'plugin.{cls.name}')
@cached_classproperty
@classmethod
def settings(cls):
""":class:`SettingsProxy` for the plugin's settings."""
if cls.name is None:
raise RuntimeError('Plugin has not been loaded yet')
instance = cls.instance
with instance.plugin_context(): # in case the default settings come from a property
return SettingsProxy(f'plugin_{cls.name}', instance.default_settings, cls.strict_settings,
acls=cls.acl_settings, converters=cls.settings_converters)
@cached_classproperty
@classmethod
def event_settings(cls):
""":class:`EventSettingsProxy` for the plugin's event-specific settings."""
if cls.name is None:
raise RuntimeError('Plugin has not been loaded yet')
instance = cls.instance
with instance.plugin_context(): # in case the default settings come from a property
return EventSettingsProxy(f'plugin_{cls.name}', instance.default_event_settings,
cls.strict_settings, acls=cls.acl_event_settings,
converters=cls.event_settings_converters)
@cached_classproperty
@classmethod
def user_settings(cls):
""":class:`UserSettingsProxy` for the plugin's user-specific settings."""
if cls.name is None:
raise RuntimeError('Plugin has not been loaded yet')
instance = cls.instance
with instance.plugin_context(): # in case the default settings come from a property
return UserSettingsProxy(f'plugin_{cls.name}', instance.default_user_settings,
cls.strict_settings, converters=cls.user_settings_converters)
def plugin_url_rule_to_js(endpoint):
"""Like :func:`~indico.web.flask.util.url_rule_to_js` but prepending plugin name prefix to the endpoint"""
if '.' in endpoint[1:]: # 'foo' or '.foo' should not get the prefix
endpoint = f'plugin_{endpoint}'
return url_rule_to_js(endpoint)
def url_for_plugin(endpoint, *targets, **values):
"""Like :func:`~indico.web.flask.util.url_for` but prepending ``'plugin_'`` to the blueprint name."""
if '.' in endpoint[1:]: # 'foo' or '.foo' should not get the prefix
endpoint = f'plugin_{endpoint}'
return url_for(endpoint, *targets, **values)
def get_plugin_template_module(template_name, **context):
"""Like :func:`~indico.web.flask.templating.get_template_module`, but using plugin templates"""
template_name = f'{current_plugin.name}:{template_name}'
return get_template_module(template_name, **context)
class IndicoPluginEngine(PluginEngine):
plugin_class = IndicoPlugin
class IndicoPluginBlueprintSetupState(PluginBlueprintSetupStateMixin, IndicoBlueprintSetupState):
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
if rule.startswith('/static'):
with self._unprefixed():
super().add_url_rule(rule, endpoint, view_func, **options)
else:
super().add_url_rule(rule, endpoint, view_func, **options)
class IndicoPluginBlueprint(PluginBlueprintMixin, IndicoBlueprint):
"""The Blueprint class all plugins need to use.
It contains the necessary logic to run the blueprint's view
functions inside the correct plugin context and to make the
static folder work.
"""
def make_setup_state(self, app, options, first_registration=False):
return IndicoPluginBlueprintSetupState(self, app, options, first_registration)
class WPJinjaMixinPlugin(WPJinjaMixin):
render_template_func = staticmethod(render_plugin_template)
# This is the same value as in WPJinjaMixin but NOT redundant:
# A plugin may have a WP inheriting from `WPJinjaMixinPlugin, WPSomethingElse`
# to get the render_template_func from here while `WPSomethingElse`
# already sets a template prefix and also inherits from WPJinjaMixin,
# in which case the WPJinjaMixin from here would be skipped due to how
# Python's MRO works and thus the template prefix would not be cleared.
template_prefix = ''
@signals.menu.items.connect_via('admin-sidemenu')
def _extend_admin_menu(sender, **kwargs):
if session.user.is_admin:
return SideMenuItem('plugins', _('Plugins'), url_for('plugins.index'), 80, icon='puzzle')
plugin_engine = IndicoPluginEngine()
|
{
"content_hash": "229e8ee7793b059b26577fa5c1c5c6d4",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 114,
"avg_line_length": 43.66352201257862,
"alnum_prop": 0.6715160244868563,
"repo_name": "DirkHoffmann/indico",
"id": "43548a68683de84c69107a15c5fe04db29cca91f",
"size": "14099",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/core/plugins/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
import mock
import unittest
import responses
import base64
import json
from peewee import SqliteDatabase
from library.item import Item
resps = json.load(open("responses.json"))
class LibraryAddingTestCase(unittest.TestCase):
def setUp(self):
db = SqliteDatabase(":memory:")
Item._meta.database = db
Item.create_table()
@responses.activate
def test_have_movie(self):
"""Test adding a movie to the library"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["The Godfather"]),
status=200, content_type="application/json")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "have")
@responses.activate
def test_want_movie(self):
"""Test adding a movie to the wanted-list"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["The Godfather"]),
status=200, content_type="application/json")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("want")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "want")
@responses.activate
def test_have_existing_movie(self):
"""Test adding a movie that's already in the library"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["The Godfather"]),
status=200, content_type="application/json")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0068646").count() == 1)
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0068646").count() == 1)
@responses.activate
def test_have_movie_with_unicode_in_title(self):
"""Test adding a movie that has unicode in it's title"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps[u"WALL·E"]),
status=200, content_type="application/json")
movie = Item("movie", u"WALL·E")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0910970").first())
@responses.activate
def test_have_wanted_movie(self):
"""Test adding a movie that's already in the wanted list"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["The Godfather"]),
status=200, content_type="application/json")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("want")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "want")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "have")
@responses.activate
def test_have_wanted_movie_with_unicode_in_title(self):
"""Test adding a movie with unicode in the title that's already in the wanted list"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps[u"WALL·E"]),
status=200, content_type="application/json")
movie = Item("movie", u"WALL·E")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("want")
assert(Item.select().where(Item.item_ID == "tt0910970").first().state == "want")
movie = Item("movie", u"WALL·E")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0910970").first().state == "have")
@responses.activate
def test_want_movie_already_in_library(self):
"""Test adding a movie that's already in the library to the wanted-list"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["The Godfather"]),
status=200, content_type="application/json")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "have")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("want")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "want")
@responses.activate
def test_want_already_wanted_movie(self):
"""Test adding to the wanted-list a movie that is already wanted"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["The Godfather"]),
status=200, content_type="application/json")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("want")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "want")
movie = Item("movie", "The Godfather")
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("want")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "want")
@responses.activate
def test_have_TV_show(self):
"""Test adding a TV show to the library"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["The Sopranos"]),
status=200, content_type="application/json")
show = Item("TV show", "The Sopranos")
with mock.patch("__builtin__.raw_input", return_value="y"):
show.find(False)
show.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0141842").first())
@responses.activate
def test_have_book(self):
"""Test adding a book to the library"""
responses.add(responses.GET, "https://www.googleapis.com/books/v1/volumes",
body=json.dumps(resps["A Game of Thrones"]),
status=200, content_type="application/json")
book = Item("book", "A Game of Thrones")
with mock.patch("__builtin__.raw_input", return_value="3"):
book.find(False)
book.set_state("have")
assert(Item.select().where(Item.item_ID == "btpIkZ6X6egC").first())
@responses.activate
def test_have_game(self):
"""Test adding a game to the library"""
responses.add(responses.GET, "http://thegamesdb.net/api/GetGamesList.php",
body=base64.b64decode(resps["FIFA 14"]["data"]),
status=200, content_type="text/xml")
game = Item("game", "FIFA 14")
with mock.patch("__builtin__.raw_input", return_value="16"):
game.find(False)
game.set_state("have")
assert(Item.select().where(Item.item_ID == "18817").first())
class LibraryAddingByIDTestCase(unittest.TestCase):
def setUp(self):
db = SqliteDatabase(":memory:")
Item._meta.database = db
Item.create_table()
@responses.activate
def test_adding_movie_by_ID(self):
"""Test adding a movie to the library using an IMDB ID"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["tt0068646"]),
status=200, content_type="application/json")
movie = Item("movie", "tt0068646", True)
with mock.patch("__builtin__.raw_input", return_value="y"):
movie.find(False)
movie.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0068646").first().state == "have")
@responses.activate
def test_adding_TV_show_by_ID(self):
"""Test adding a TV Show to the library using an IMDB ID"""
responses.add(responses.GET, "http://omdbapi.com/",
body=json.dumps(resps["tt0141842"]),
status=200, content_type="application/json")
show = Item("TV show", "tt0141842", True)
with mock.patch("__builtin__.raw_input", return_value="y"):
show.find(False)
show.set_state("have")
assert(Item.select().where(Item.item_ID == "tt0141842").first().state == "have")
@responses.activate
def test_adding_book_by_ID(self):
"""Test adding a book to the library using a Google Books volume ID"""
responses.add(responses.GET, "https://www.googleapis.com/books/v1/volumes/btpIkZ6X6egC",
body=json.dumps(resps["btpIkZ6X6egC"]),
status=200, content_type="application/json")
book = Item("book", "btpIkZ6X6egC", True)
with mock.patch("__builtin__.raw_input", return_value="y"):
book.find(False)
book.set_state("have")
assert(Item.select().where(Item.item_ID == "btpIkZ6X6egC").first())
@responses.activate
def test_adding_game_by_ID(self):
"""Test adding a game to the library using a TheGamesDB ID"""
responses.add(responses.GET, "http://thegamesdb.net/api/GetGame.php",
body=base64.b64decode(resps["18817"]["data"]),
status=200, content_type="text/xml")
game = Item("game", "18817", True)
with mock.patch("__builtin__.raw_input", return_value="y"):
game.find(False)
game.set_state("have")
assert(Item.select().where(Item.item_ID == "18817").first())
|
{
"content_hash": "9f7f3819c23ceab488e2f39033a3131d",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 96,
"avg_line_length": 44.50420168067227,
"alnum_prop": 0.5818542296072508,
"repo_name": "U2Ft/library",
"id": "868f85cabba3c2c1664d9e09e57f2ef0a21627cf",
"size": "10622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_adding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41648"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from horseradish.views import HelpView
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^help/$', login_required(HelpView.as_view()), name='horseradish.help'),
url(r'^search/', include('haystack.urls')),
url(r'^', include('googleauth.urls')),
url(r'^', include('photolib.urls')),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
{
"content_hash": "82397e3844891d3b2125a9acce025bf3",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 81,
"avg_line_length": 30.454545454545453,
"alnum_prop": 0.6895522388059702,
"repo_name": "sunlightlabs/horseradish",
"id": "cd0ec72b15d55ba2458517bfd3eac95fe1fb989f",
"size": "670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horseradish/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "28153"
},
{
"name": "JavaScript",
"bytes": "16469"
},
{
"name": "Python",
"bytes": "21060"
}
],
"symlink_target": ""
}
|
"""
The package provides different property value editor.
"""
__version__ = "$Revision-Id:$"
|
{
"content_hash": "8f5e35ffc6a6db7acba3c4a7740f794c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 53,
"avg_line_length": 13.375,
"alnum_prop": 0.5887850467289719,
"repo_name": "DLR-SC/DataFinder",
"id": "f648b029a222e443f63d724ef8322059568e2ef0",
"size": "1801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datafinder/gui/user/common/widget/property/editors/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from pip.req import parse_requirements
import app
install_reqs = parse_requirements('requirements.txt', session=False)
reqs = [ str(i.req) for i in install_reqs ]
setup(name='app',
version="v0.0.4",
description='app',
url='http://github.com/martyni/jenkins_test',
author='martyni',
author_email='martynjamespratt@gmail.com',
license='MIT',
install_requires=reqs,
packages=['app'],
zip_safe=False,
entry_points = {
'console_scripts': ['boop=app:app.run'],
},
include_package_data=True
)
|
{
"content_hash": "d35c4fd9ec4f56c9e5571b5b9098c590",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 27.08695652173913,
"alnum_prop": 0.6115569823434992,
"repo_name": "martyni/boop",
"id": "adaa1cf749c7940a7fd2eac5ca7e0f624ada5790",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1519"
},
{
"name": "Python",
"bytes": "9665"
},
{
"name": "Shell",
"bytes": "781"
}
],
"symlink_target": ""
}
|
"""
Copyright 2014 Sotera Defense Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class DataConnector:
def __init__(self):
pass
def open(self):
raise NotImplementedError("Implement open()")
def close(self):
raise NotImplementedError("Implement close()")
def _check_conn(self):
raise NotImplementedError("Implement _checkConn()")
def get_matching_entities_from_url(self, urls):
raise NotImplementedError("Implement get_matching_entities_from_url() ")
def get_extracted_domain_entities_for_urls(self, domain_id, urls):
raise NotImplementedError("Implement get_extracted_domain_entities_for_urls")
def get_extracted_entities_from_urls(self, urls, type=None):
raise NotImplementedError("Implement getExtractedEntitiesFromUrls()")
def get_extracted_domain_entities_from_urls(self, domain_id, urls, type=None):
raise NotImplementedError("Implement getExtractedDomainEntitiesFromUrls()")
def get_extracted_entities_with_domain_check(self, urls, types=None, domain='default'):
raise NotImplementedError("Implement getExtractedEntitiesWithDomainCheck()")
def get_domain_entity_matches(self, domain_id, type, values):
raise NotImplementedError("Implement getEntityMatches()")
def get_domain_items(self, domain_id, limit):
raise NotImplementedError("Implement get_domain_items()")
def delete_domain_items(self, domain_id):
raise NotImplementedError("Implement delete_domain_items()")
def add_new_domain_items(self, domain_id,features):
raise NotImplementedError("Implement add_new_domain_items()")
def insert_entities(self, url, entity_type, entity_values):
raise NotImplementedError("Implement insertEntities()")
def insert_domain_entities(self, domain_id,url, entity_type, entity_values):
raise NotImplementedError("Implement insertDomainEntities()")
|
{
"content_hash": "d53b49edb3830a91f711d8ef00902a6f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 91,
"avg_line_length": 32.171052631578945,
"alnum_prop": 0.7296523517382413,
"repo_name": "Sotera/Datawake",
"id": "c5070f6a598eb4e7e0f17a963350900e95348fea",
"size": "2445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/datawake/util/dataconnector/data_connector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29169"
},
{
"name": "HTML",
"bytes": "45221"
},
{
"name": "JavaScript",
"bytes": "218139"
},
{
"name": "Perl",
"bytes": "3571"
},
{
"name": "Python",
"bytes": "199251"
},
{
"name": "Shell",
"bytes": "730"
}
],
"symlink_target": ""
}
|
""" Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to normalized encoding
names as defined in the normalize_encoding() function below, e.g.
'utf-8' must be implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> codecs.CodecInfo object
The getregentry() API must return a CodecInfo object with encoder, decoder,
incrementalencoder, incrementaldecoder, streamwriter and streamreader
atttributes which adhere to the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be normalized encoding
names as defined by normalize_encoding().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
from . import aliases
_cache = {}
_unknown = '--unknown--'
_import_tail = ['*']
_aliases = aliases.aliases
class CodecRegistryError(LookupError, SystemError):
pass
def normalize_encoding(encoding):
""" Normalize an encoding name.
Normalization works as follows: all non-alphanumeric
characters except the dot used for Python package names are
collapsed and replaced with a single underscore, e.g. ' -;#'
becomes '_'. Leading and trailing underscores are removed.
Note that encoding names should be ASCII only; if they do use
non-ASCII characters, these must be Latin-1 compatible.
"""
if isinstance(encoding, bytes):
encoding = str(encoding, "ascii")
chars = []
punct = False
for c in encoding:
if c.isalnum() or c == '.':
if punct and chars:
chars.append('_')
chars.append(c)
punct = False
else:
punct = True
return ''.join(chars)
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding, _unknown)
if entry is not _unknown:
return entry
# Import the module:
#
# First try to find an alias for the normalized encoding
# name and lookup the module using the aliased name, then try to
# lookup the module using the standard import scheme, i.e. first
# try in the encodings package, then at top-level.
#
norm_encoding = normalize_encoding(encoding)
aliased_encoding = _aliases.get(norm_encoding) or \
_aliases.get(norm_encoding.replace('.', '_'))
if aliased_encoding is not None:
modnames = [aliased_encoding,
norm_encoding]
else:
modnames = [norm_encoding]
for modname in modnames:
if not modname or '.' in modname:
continue
try:
# Import is absolute to prevent the possibly malicious import of a
# module with side-effects that is not in the 'encodings' package.
mod = __import__('encodings.' + modname, fromlist=_import_tail,
level=0)
except ImportError:
pass
else:
break
else:
mod = None
try:
getregentry = mod.getregentry
except AttributeError:
# Not a codec module
mod = None
if mod is None:
# Cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
entry = getregentry()
if not isinstance(entry, codecs.CodecInfo):
if not 4 <= len(entry) <= 7:
raise CodecRegistryError('module "%s" (%s) failed to register'
% (mod.__name__, mod.__file__))
if not callable(entry[0]) or not callable(entry[1]) or \
(entry[2] is not None and not callable(entry[2])) or \
(entry[3] is not None and not callable(entry[3])) or \
(len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
(len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
raise CodecRegistryError('incompatible codecs in module "%s" (%s)'
% (mod.__name__, mod.__file__))
if len(entry)<7 or entry[6] is None:
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
entry = codecs.CodecInfo(*entry)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if alias not in _aliases:
_aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function)
|
{
"content_hash": "29ab653d6ffc7a08d59668e3d38fb792",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 84,
"avg_line_length": 34.33552631578947,
"alnum_prop": 0.6008813949032382,
"repo_name": "grupoprog3/proyecto_final",
"id": "21d3b869f7357843ebe4f28447f630f2714c2109",
"size": "5219",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "proyecto/flask/Lib/encodings/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2476"
},
{
"name": "C",
"bytes": "415974"
},
{
"name": "C++",
"bytes": "129981"
},
{
"name": "CSS",
"bytes": "19219"
},
{
"name": "HTML",
"bytes": "8260"
},
{
"name": "JavaScript",
"bytes": "35399"
},
{
"name": "PowerShell",
"bytes": "16500"
},
{
"name": "Python",
"bytes": "23261737"
},
{
"name": "Tcl",
"bytes": "2570733"
}
],
"symlink_target": ""
}
|
import json
import threading
import importlib
import six
from django.conf import settings
from django.http import HttpResponse
from django.core.serializers.json import DjangoJSONEncoder
from gripcontrol import HttpStreamFormat
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
tlocal = threading.local()
def have_channels():
try:
from channels.generic.http import AsyncHttpConsumer
return True
except ImportError:
return False
# return dict of (channel, last-id)
def parse_last_event_id(s):
out = {}
parts = s.split(',')
for part in parts:
channel, last_id = part.split(':')
out[channel] = last_id
return out
def make_id(ids):
id_parts = []
for channel, id in six.iteritems(ids):
enc_channel = quote(channel)
id_parts.append('%s:%s' % (enc_channel, id))
return ','.join(id_parts)
def build_id_escape(s):
out = ''
for c in s:
if c == '%':
out += '%%'
else:
out += c
return out
def sse_encode_event(event_type, data, event_id=None, escape=False):
if escape:
event_type = build_id_escape(event_type)
data = build_id_escape(data)
out = 'event: %s\n' % event_type
if event_id:
out += 'id: %s\n' % event_id
out += 'data: %s\n\n' % data
return out
def sse_error_response(condition, text, extra=None):
if extra is None:
extra = {}
data = {'condition': condition, 'text': text}
for k, v in six.iteritems(extra):
data[k] = v
data = json.dumps(data, cls=DjangoJSONEncoder)
body = sse_encode_event('stream-error', data, event_id='error')
return HttpResponse(body, content_type='text/event-stream')
def publish_event(channel, event_type, data, pub_id, pub_prev_id,
skip_user_ids=None, **publish_kwargs):
from django_grip import publish
if skip_user_ids is None:
skip_user_ids = []
content_filters = []
if pub_id:
event_id = '%I'
content_filters.append('build-id')
else:
event_id = None
content = sse_encode_event(event_type, data, event_id=event_id, escape=bool(pub_id))
meta = {}
if skip_user_ids:
meta['skip_users'] = ','.join(skip_user_ids)
publish(
'events-%s' % quote(channel),
HttpStreamFormat(content, content_filters=content_filters),
id=pub_id,
prev_id=pub_prev_id,
meta=meta,
**publish_kwargs)
def publish_kick(user_id, channel):
from django_grip import publish
msg = 'Permission denied to channels: %s' % channel
data = {'condition': 'forbidden', 'text': msg, 'channels': [channel]}
content = sse_encode_event('stream-error', data, event_id='error')
meta = {'require_sub': 'events-%s' % channel}
publish(
'user-%s' % user_id,
HttpStreamFormat(content),
id='kick-1',
meta=meta)
publish(
'user-%s' % user_id,
HttpStreamFormat(close=True),
id='kick-2',
prev_id='kick-1',
meta=meta)
def load_class(name):
at = name.rfind('.')
if at == -1:
raise ValueError('class name contains no \'.\'')
module_name = name[0:at]
class_name = name[at + 1:]
return getattr(importlib.import_module(module_name), class_name)()
# load and keep in thread local storage
def get_class(name):
if not hasattr(tlocal, 'loaded'):
tlocal.loaded = {}
c = tlocal.loaded.get(name)
if c is None:
c = load_class(name)
tlocal.loaded[name] = c
return c
def get_class_from_setting(setting_name, default=None):
if hasattr(settings, setting_name):
return get_class(getattr(settings, setting_name))
elif default:
return get_class(default)
else:
return None
def get_storage():
return get_class_from_setting('EVENTSTREAM_STORAGE_CLASS')
def get_channelmanager():
return get_class_from_setting(
'EVENTSTREAM_CHANNELMANAGER_CLASS',
'django_eventstream.channelmanager.DefaultChannelManager')
def add_default_headers(headers):
headers['Cache-Control'] = 'no-cache'
headers['X-Accel-Buffering'] = 'no'
augment_cors_headers(headers)
def augment_cors_headers(headers):
cors_origin = getattr(settings, 'EVENTSTREAM_ALLOW_ORIGIN', '')
if cors_origin:
headers['Access-Control-Allow-Origin'] = cors_origin
allow_credentials = getattr(settings, 'EVENTSTREAM_ALLOW_CREDENTIALS', False)
if allow_credentials:
headers['Access-Control-Allow-Credentials'] = 'true'
allow_headers = getattr(settings, 'EVENTSTREAM_ALLOW_HEADERS', '')
if allow_headers:
headers['Access-Control-Allow-Headers'] = allow_headers
|
{
"content_hash": "8a6946e22463c360cbb85f1c2a939454",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 85,
"avg_line_length": 25.66867469879518,
"alnum_prop": 0.6974888523820699,
"repo_name": "fanout/django-eventstream",
"id": "1980f668df5746d99f23d1d8e06b325810053485",
"size": "4261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_eventstream/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22109"
},
{
"name": "Python",
"bytes": "40438"
}
],
"symlink_target": ""
}
|
from functools import partial
from robot.errors import DataError
from robot.utils import (is_dict_like, is_list_like, normalize,
RecommendationFinder)
def raise_not_found(name, candidates, msg=None):
"""Raise DataError for missing variable name.
Return recommendations for similar variable names if any are found.
"""
if msg is None:
msg = "Variable '%s' not found." % name
candidates = _decorate_candidates(name[0], candidates)
normalizer = partial(normalize, ignore='$@%&*{}_', caseless=True,
spaceless=True)
finder = RecommendationFinder(normalizer)
recommendations = finder.find_recommendations(name, candidates)
msg = finder.format_recommendations(msg, recommendations)
raise DataError(msg)
def _decorate_candidates(identifier, candidates):
is_included = {'$': lambda value: True,
'@': is_list_like,
'&': is_dict_like,
'%': lambda value: True}[identifier]
return ['%s{%s}' % (identifier, name)
for name in candidates if is_included(candidates[name])]
|
{
"content_hash": "45befea3ced1df78c729865001b30b49",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 37.9,
"alnum_prop": 0.6358839050131926,
"repo_name": "edbrannin/robotframework",
"id": "146fc8a2d46be252baa0b2d22860cc8ff5248c6a",
"size": "1745",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/robot/variables/notfound.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22850"
},
{
"name": "HTML",
"bytes": "137470"
},
{
"name": "Java",
"bytes": "59109"
},
{
"name": "JavaScript",
"bytes": "159451"
},
{
"name": "Python",
"bytes": "2063857"
},
{
"name": "RobotFramework",
"bytes": "1903686"
},
{
"name": "Shell",
"bytes": "491"
}
],
"symlink_target": ""
}
|
__author__ = 'stefanotranquillini'
|
{
"content_hash": "6a498ae8588ef2c775e51db12060be1c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 35,
"alnum_prop": 0.7142857142857143,
"repo_name": "esseti/dododo-dadada",
"id": "1e4ded1c6a4f0c0af8627f82ccec245230701367",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "do/templatetags/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3810"
},
{
"name": "JavaScript",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "30889"
}
],
"symlink_target": ""
}
|
import argparse
from os import listdir
from os.path import isfile, join
def count_fathatan(folder_path):
files = [file for file in listdir(folder_path) if isfile(join(folder_path, file))]
before = 0
after = 0
for file in files:
with open(join(folder_path, file), 'r') as f:
lines = f.readlines()
pre = ''
for line in lines:
for ch in line:
if ch == u'ا' and pre == u'ً':
before += 1
elif ch == u'ً' and pre == u'ا':
after += 1
pre = ch
return before, after
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Count fathatan before and after Alif')
parser.add_argument('-in', '--folder-path', help='Folder path to count from all files inside it', required=True)
args = parser.parse_args()
before, after = count_fathatan(args.folder_path)
print('Before Alif:', before)
print('After Alif:', after)
|
{
"content_hash": "f947e681ef83af5c77c1e43e5ac54a2e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 114,
"avg_line_length": 27.441176470588236,
"alnum_prop": 0.6087888531618435,
"repo_name": "AliOsm/arabic-text-diacritization",
"id": "0bc0e825d3a797e70a03aecefa4907ac1e61e7a9",
"size": "962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/count_fathatan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "18787"
},
{
"name": "Python",
"bytes": "24727"
}
],
"symlink_target": ""
}
|
"""Nearly exact trust-region optimization subproblem."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import (norm, get_lapack_funcs, solve_triangular,
cho_solve)
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
__all__ = ['_minimize_trustregion_exact',
'estimate_smallest_singular_value',
'singular_leading_submatrix',
'IterativeSubproblem']
def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None,
**trust_region_options):
"""
Minimization of scalar function of one or more variables using
a nearly exact trust-region algorithm.
Options
-------
initial_tr_radius : float
Initial trust-region radius.
max_tr_radius : float
Maximum value of the trust-region radius. No steps that are longer
than this value will be proposed.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than ``gtol`` before successful
termination.
"""
if jac is None:
raise ValueError('Jacobian is required for trust region '
'exact minimization.')
if hess is None:
raise ValueError('Hessian matrix is required for trust region '
'exact minimization.')
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
subproblem=IterativeSubproblem,
**trust_region_options)
def estimate_smallest_singular_value(U):
"""Given upper triangular matrix ``U`` estimate the smallest singular
value and the correspondent right singular vector in O(n**2) operations.
Parameters
----------
U : ndarray
Square upper triangular matrix.
Returns
-------
s_min : float
Estimated smallest singular value of the provided matrix.
z_min : ndarray
Estimatied right singular vector.
Notes
-----
The procedure is based on [1]_ and is done in two steps. First it finds
a vector ``e`` with components selected from {+1, -1} such that the
solution ``w`` from the system ``U.T w = e`` is as large as possible.
Next it estimate ``U v = w``. The smallest singular value is close
to ``norm(w)/norm(v)`` and the right singular vector is close
to ``v/norm(v)``.
The estimation will be better more ill-conditioned is the matrix.
References
----------
.. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H.
An estimate for the condition number of a matrix. 1979.
SIAM Journal on Numerical Analysis, 16(2), 368-375.
"""
U = np.atleast_2d(U)
m, n = U.shape
if m != n:
raise ValueError("A square triangular matrix should be provided.")
# A vector `e` with components selected from {+1, -1}
# is selected so that the solution `w` to the system
# `U.T w = e` is as large as possible. Implementation
# based on algorithm 3.5.1, p. 142, from reference [2]
# adapted for lower triangular matrix.
p = np.zeros(n)
w = np.empty(n)
# Implemented according to: Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press. pp. 140-142.
for k in range(n):
wp = (1-p[k]) / U.T[k, k]
wm = (-1-p[k]) / U.T[k, k]
pp = p[k+1:] + U.T[k+1:, k]*wp
pm = p[k+1:] + U.T[k+1:, k]*wm
if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1):
w[k] = wp
p[k+1:] = pp
else:
w[k] = wm
p[k+1:] = pm
# The system `U v = w` is solved using backward substitution.
v = solve_triangular(U, w)
v_norm = norm(v)
w_norm = norm(w)
# Smallest singular value
s_min = w_norm / v_norm
# Associated vector
z_min = v / v_norm
return s_min, z_min
def gershgorin_bounds(H):
"""
Given a square matrix ``H`` compute upper
and lower bounds for its eigenvalues (Gregoshgorin Bounds).
Defined ref. [1].
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
H_diag = np.diag(H)
H_diag_abs = np.abs(H_diag)
H_row_sums = np.sum(np.abs(H), axis=1)
lb = np.min(H_diag + H_diag_abs - H_row_sums)
ub = np.max(H_diag - H_diag_abs + H_row_sums)
return lb, ub
def singular_leading_submatrix(A, U, k):
"""
Compute term that makes the leading ``k`` by ``k``
submatrix from ``A`` singular.
Parameters
----------
A : ndarray
Symmetric matrix that is not positive definite.
U : ndarray
Upper triangular matrix resulting of an incomplete
Cholesky decomposition of matrix ``A``.
k : int
Positive integer such that the leading k by k submatrix from
`A` is the first non-positive definite leading submatrix.
Returns
-------
delta : float
Amount that should be added to the element (k, k) of the
leading k by k submatrix of ``A`` to make it singular.
v : ndarray
A vector such that ``v.T B v = 0``. Where B is the matrix A after
``delta`` is added to its element (k, k).
"""
# Compute delta
delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1]
n = len(A)
# Inicialize v
v = np.zeros(n)
v[k-1] = 1
# Compute the remaining values of v by solving a triangular system.
if k != 1:
v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1])
return delta, v
class IterativeSubproblem(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by nearly exact iterative method.
Notes
-----
This subproblem solver was based on [1]_, [2]_ and [3]_,
which implement similar algorithms. The algorithm is basically
that of [1]_ but ideas from [2]_ and [3]_ were also used.
References
----------
.. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods",
Siam, pp. 169-200, 2000.
.. [2] J. Nocedal and S. Wright, "Numerical optimization",
Springer Science & Business Media. pp. 83-91, 2006.
.. [3] J.J. More and D.C. Sorensen, "Computing a trust region step",
SIAM Journal on Scientific and Statistical Computing, vol. 4(3),
pp. 553-572, 1983.
"""
# UPDATE_COEFF appears in reference [1]_
# in formula 7.3.14 (p. 190) named as "theta".
# As recommended there it value is fixed in 0.01.
UPDATE_COEFF = 0.01
EPS = np.finfo(float).eps
def __init__(self, x, fun, jac, hess, hessp=None,
k_easy=0.1, k_hard=0.2):
super(IterativeSubproblem, self).__init__(x, fun, jac, hess)
# When the trust-region shrinks in two consecutive
# calculations (``tr_radius < previous_tr_radius``)
# the lower bound ``lambda_lb`` may be reused,
# facilitating the convergence. To indicate no
# previous value is known at first ``previous_tr_radius``
# is set to -1 and ``lambda_lb`` to None.
self.previous_tr_radius = -1
self.lambda_lb = None
self.niter = 0
# ``k_easy`` and ``k_hard`` are parameters used
# to determine the stop criteria to the iterative
# subproblem solver. Take a look at pp. 194-197
# from reference _[1] for a more detailed description.
self.k_easy = k_easy
self.k_hard = k_hard
# Get Lapack function for cholesky decomposition.
# The implemented Scipy wrapper does not return
# the incomplete factorization needed by the method.
self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,))
# Get info about Hessian
self.dimension = len(self.hess)
self.hess_gershgorin_lb,\
self.hess_gershgorin_ub = gershgorin_bounds(self.hess)
self.hess_inf = norm(self.hess, np.Inf)
self.hess_fro = norm(self.hess, 'fro')
# A constant such that for vectors smaler than that
# backward substituition is not reliable. It was stabilished
# based on Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press., p.165.
self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf
def _initial_values(self, tr_radius):
"""Given a trust radius, return a good initial guess for
the damping factor, the lower bound and the upper bound.
The values were chosen accordingly to the guidelines on
section 7.3.8 (p. 192) from [1]_.
"""
# Upper bound for the damping factor
lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb,
self.hess_fro,
self.hess_inf))
# Lower bound for the damping factor
lambda_lb = max(0, -min(self.hess.diagonal()),
self.jac_mag/tr_radius - min(self.hess_gershgorin_ub,
self.hess_fro,
self.hess_inf))
# Improve bounds with previous info
if tr_radius < self.previous_tr_radius:
lambda_lb = max(self.lambda_lb, lambda_lb)
# Initial guess for the damping factor
if lambda_lb == 0:
lambda_initial = 0
else:
lambda_initial = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
return lambda_initial, lambda_lb, lambda_ub
def solve(self, tr_radius):
"""Solve quadratic subproblem"""
lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius)
n = self.dimension
hits_boundary = True
already_factorized = False
self.niter = 0
while True:
# Compute Cholesky factorization
if already_factorized:
already_factorized = False
else:
H = self.hess+lambda_current*np.eye(n)
U, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
self.niter += 1
# Check if factorization succeeded
if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO:
# Successful factorization
# Solve `U.T U p = s`
p = cho_solve((U, False), -self.jac)
p_norm = norm(p)
# Check for interior convergence
if p_norm <= tr_radius and lambda_current == 0:
hits_boundary = False
break
# Solve `U.T w = p`
w = solve_triangular(U, p, trans='T')
w_norm = norm(w)
# Compute Newton step accordingly to
# formula (4.44) p.87 from ref [2]_.
delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius
lambda_new = lambda_current + delta_lambda
if p_norm < tr_radius: # Inside boundary
s_min, z_min = estimate_smallest_singular_value(U)
ta, tb = self.get_boundaries_intersections(p, z_min,
tr_radius)
# Choose `step_len` with the smallest magnitude.
# The reason for this choice is explained at
# ref [3]_, p. 6 (Immediately before the formula
# for `tau`).
step_len = min([ta, tb], key=abs)
# Compute the quadratic term (p.T*H*p)
quadratic_term = np.dot(p, np.dot(H, p))
# Check stop criteria
relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2)
if relative_error <= self.k_hard:
p += step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Compute Cholesky factorization
H = self.hess + lambda_new*np.eye(n)
c, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
# Check if the factorization have succeeded
#
if info == 0: # Successful factorization
# Update damping factor
lambda_current = lambda_new
already_factorized = True
else: # Unsuccessful factorization
# Update uncertanty bounds
lambda_lb = max(lambda_lb, lambda_new)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Outside boundary
# Check stop criteria
relative_error = abs(p_norm - tr_radius) / tr_radius
if relative_error <= self.k_easy:
break
# Update uncertanty bounds
lambda_lb = lambda_current
# Update damping factor
lambda_current = lambda_new
elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO:
# jac_mag very close to zero
# Check for interior convergence
if lambda_current == 0:
p = np.zeros(n)
hits_boundary = False
break
s_min, z_min = estimate_smallest_singular_value(U)
step_len = tr_radius
# Check stop criteria
if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2:
p = step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Unsuccessful factorization
# Compute auxiliary terms
delta, v = singular_leading_submatrix(H, U, info)
v_norm = norm(v)
# Update uncertanty interval
lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
self.lambda_lb = lambda_lb
self.lambda_current = lambda_current
self.previous_tr_radius = tr_radius
return p, hits_boundary
|
{
"content_hash": "aa37f16194386b865a02cf4f496aa0f4",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 110,
"avg_line_length": 35.861111111111114,
"alnum_prop": 0.5331784146656339,
"repo_name": "gfyoung/scipy",
"id": "2fd1b092ea030ff0d428e08f223922379deb72f1",
"size": "15492",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "scipy/optimize/_trustregion_exact.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4142653"
},
{
"name": "C++",
"bytes": "498142"
},
{
"name": "Fortran",
"bytes": "5572451"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11540629"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
class OCSecretAdd(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecretAdd, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.verbose = verbose
self._service_account = None
@property
def service_account(self):
''' Property for the yed var '''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter for the yed var '''
self._service_account = data
def exists(self, in_secret):
''' return whether a key, value pair exists '''
result = self.service_account.find_secret(in_secret)
if not result:
return False
return True
def get(self):
'''return a environment variables '''
env = self._get(OCSecretAdd.kind, self.config.name)
if env['returncode'] == 0:
self.service_account = ServiceAccount(content=env['results'][0])
env['results'] = self.service_account.get('secrets')
return env
def delete(self):
'''delete secrets '''
modified = []
for rem_secret in self.service_account.secrets:
modified.append(self.service_account.delete_secret(rem_secret))
if any(modified):
return self._replace_content(OCSecretAdd.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
'''place secrets into sa '''
modified = False
for add_secret in self.config.secrets:
if not self.service_account.find_secret(add_secret):
self.service_account.add_secret(add_secret)
modified = True
if modified:
return self._replace_content(OCSecretAdd.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' return a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
# pylint: disable=too-many-public-methods
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets == None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
print "Getting secrets property"
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret})
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret})
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
def main():
'''
ansible oc module for services
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
kind=dict(default='rc', choices=['dc', 'rc', 'pods'], type='str'),
namespace=dict(default='default', type='str'),
secrets=dict(default=None, type='list'),
service_account=dict(default=None, type='str'),
),
supports_check_mode=True,
)
sconfig = ServiceAccountConfig(module.params['service_account'],
module.params['namespace'],
module.params['kubeconfig'],
module.params['secrets'],
None)
oc_secret_add = OCSecretAdd(sconfig,
verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_secret_add.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
for secret in module.params.get('secrets', []):
if oc_secret_add.exists(secret):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_secret_add.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
for secret in module.params.get('secrets', []):
if not oc_secret_add.exists(secret):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_secret_add.put()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_secret_add.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
{
"content_hash": "676eb833a5c3a63e7fe66e156acfffec",
"timestamp": "",
"source": "github",
"line_count": 1173,
"max_line_length": 118,
"avg_line_length": 32.690537084398976,
"alnum_prop": 0.5285818599071611,
"repo_name": "jupierce/openshift-tools",
"id": "a64e47ff6d59cd511ba8ef364c57931a4995c1fd",
"size": "38757",
"binary": false,
"copies": "3",
"ref": "refs/heads/prod",
"path": "ansible/roles/lib_openshift_3.2/library/oc_secret_add.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "107055"
},
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "JavaScript",
"bytes": "229"
},
{
"name": "PHP",
"bytes": "35735"
},
{
"name": "Python",
"bytes": "7239555"
},
{
"name": "Shell",
"bytes": "579824"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
}
|
import json
from core.err_code import err_desc_ch, OCT_SUCCESS, err_desc_en
from core.log import ERROR
from views.api.center.api import PARAM_NOT_NULL, PARAM_TYPE_INT, PARAM_TYPE_STRING
def appendBaseArg(argObj, request):
if (not argObj):
argObj = {}
argObj["LAST_ADDR"] = request.remote_ip
argObj["REMOTE_ADDR"] = request.headers.get("X-Real-Ip") or ""
if ("paras" not in argObj.keys()):
argObj["paras"] = {}
return argObj
def getArgObj(request):
argObj = {}
arg = str(request.body, encoding="utf-8")
if (len(arg) == 0 or arg[:1] not in ("{", "]")):
return appendBaseArg(argObj, request)
try:
if (type(arg) != type("a")):
arg = arg.encode("utf-8")
argObj = json.loads(arg)
except:
ERROR("got bad json request")
return appendBaseArg(argObj, request)
def buildFailureReply(errorNo, errorMsg=None):
retObj = {
"errorObj": {
"errorNo": errorNo,
"errorMsg": errorMsg or err_desc_ch.get(errorNo),
"errorMsgEN": err_desc_en.get(errorNo),
},
"data": None
}
return json.JSONEncoder().encode(retObj)
def buildAsyncReply(res):
data = res["RetObj"]
errorNo = res["RetCode"]
retObj = {
"errorObj": {
"errorNo": errorNo,
"errorMsg": res.get("RetMsg") or err_desc_ch.get(errorNo),
"errorMsgEN": err_desc_en.get(errorNo),
},
"session": {
"uuid": res["session"]["id"],
},
"createTime": data["createTime"],
"finishTime": data["finishTime"],
"apiId": data["id"],
"apiName": data["apiName"],
"state": data["state"],
"result": data["result"]
}
return json.JSONEncoder().encode(retObj)
# if param has no default value, it must be specified,
# or else set default value to it.
def checkParas(paras, apiProto):
for (k, v) in list(apiProto["paras"].items()):
if (v["default"] != PARAM_NOT_NULL and k not in paras):
paras[k] = v["default"]
inV = paras.get(k)
if (v["default"] == PARAM_NOT_NULL and not inV):
errorMsg = "paras '%s' must be specified" % k
return False, errorMsg
if (v["type"] == PARAM_TYPE_INT and v["type"]):
paras[k] = int(paras[k])
return True, None
def buildReply(res):
errorNo = res["RetCode"]
retObj = {
"errorObj": {
"errorNo": errorNo,
"errorMsg": res.get("RetMsg") or err_desc_ch.get(errorNo),
"errorMsgEN": err_desc_en.get(errorNo),
"errorLog": res["ErrorLog"]
},
"data": res["RetObj"],
"apiId": res.get("apiId")
}
if (errorNo == OCT_SUCCESS):
if (res.get("session")):
retObj["session"] = {
"uuid": res["session"].get("id")
}
return json.JSONEncoder().encode(retObj)
|
{
"content_hash": "e098572468181d4161ee1d810d542b21",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 82,
"avg_line_length": 22.06086956521739,
"alnum_prop": 0.6342136381553015,
"repo_name": "maqg/wcrobot",
"id": "f1e8b1d658c41e26f8785a965c4ee505ec3a7610",
"size": "2580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado/utils/httpUtil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "871"
},
{
"name": "HTML",
"bytes": "58362"
},
{
"name": "JavaScript",
"bytes": "23121"
},
{
"name": "Makefile",
"bytes": "630"
},
{
"name": "PLpgSQL",
"bytes": "16632"
},
{
"name": "Perl",
"bytes": "2679"
},
{
"name": "Python",
"bytes": "156673"
},
{
"name": "Shell",
"bytes": "13165"
},
{
"name": "TypeScript",
"bytes": "12074"
}
],
"symlink_target": ""
}
|
from citrination_client.views.descriptors.descriptor import MaterialDescriptor
class AlloyCompositionDescriptor(MaterialDescriptor):
def __init__(self, key, balance_element, basis=100, threshold=None):
self.options = dict(balance_element=balance_element, basis=basis, threshold=threshold)
super(AlloyCompositionDescriptor, self).__init__(key, "Alloy composition")
|
{
"content_hash": "aba18bc9d81ca837221171bd858c8541",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 94,
"avg_line_length": 43.111111111111114,
"alnum_prop": 0.7680412371134021,
"repo_name": "CitrineInformatics/python-citrination-client",
"id": "45fdf26f35415ba6bd25905a2fd4e3673c0d9288",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "citrination_client/views/descriptors/alloy_composition_descriptor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "391180"
}
],
"symlink_target": ""
}
|
import dotslash
dotslash.export_fbcode_build(
target="//flow/src/facebook/server_callable:server_callable_extract_metadata",
oncall="staticresources",
generated_dotslash_file="flib/intern/build/meerkat/steps/server_callable/bin/server_callable_extract_metadata",
)
|
{
"content_hash": "e4d4edaeec96512faf9cad42ec10b088",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 115,
"avg_line_length": 39.714285714285715,
"alnum_prop": 0.7841726618705036,
"repo_name": "nmote/flow",
"id": "26289cba33ac66dfbbb1a2ef2d07438dbeb68181",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/server_callable_extract_metadata.dotslash.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "112238"
},
{
"name": "C++",
"bytes": "5813"
},
{
"name": "Dockerfile",
"bytes": "4611"
},
{
"name": "HTML",
"bytes": "35768"
},
{
"name": "JavaScript",
"bytes": "3020230"
},
{
"name": "Liquid",
"bytes": "17387"
},
{
"name": "Makefile",
"bytes": "27684"
},
{
"name": "OCaml",
"bytes": "8222343"
},
{
"name": "Python",
"bytes": "3830"
},
{
"name": "Ruby",
"bytes": "21863"
},
{
"name": "SCSS",
"bytes": "45501"
},
{
"name": "Shell",
"bytes": "268938"
},
{
"name": "Standard ML",
"bytes": "17465"
}
],
"symlink_target": ""
}
|
"""Contains code for loading and preprocessing the CIFAR-10 data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import dataset_data_provider
from external import datasets_cifar10
def provide_data(split_name, batch_size, dataset_dir=None):
"""Provides batches of CIFAR data.
Args:
split_name: Either 'train' or 'test'.
batch_size: The number of images in each batch.
dataset_dir: Directory where the CIFAR-10 TFRecord files live.
Defaults to "~/tensorflow/data/cifar10"
Returns:
images: A `Tensor` of size [batch_size, 32, 32, 3]
images_not_whiten: A `Tensor` with the same size of `images`, unwhitened
images.
one_hot_labels: A `Tensor` of size [batch_size, num_classes], where
each row has a single element set to one and the rest set to zeros.
dataset.num_samples: The number of total samples in the dataset.
dataset.num_classes: The number of object classes in the dataset.
Raises:
ValueError: if the split_name is not either 'train' or 'test'.
"""
with tf.device('/cpu:0'):
is_train = split_name == 'train'
if dataset_dir is None:
dataset_dir = os.path.expanduser('~/tensorflow/data/cifar10')
dataset = datasets_cifar10.get_split(split_name, dataset_dir)
provider = dataset_data_provider.DatasetDataProvider(
dataset,
common_queue_capacity=5 * batch_size,
common_queue_min=batch_size,
shuffle=is_train)
[image, label] = provider.get(['image', 'label'])
image = tf.to_float(image)
image_size = 32
if is_train:
num_threads = 4
image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4,
image_size + 4)
image = tf.random_crop(image, [image_size, image_size, 3])
image = tf.image.random_flip_left_right(image)
# Brightness/saturation/constrast provides small gains .2%~.5% on cifar.
# image = tf.image.random_brightness(image, max_delta=63. / 255.)
# image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
# image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
else:
num_threads = 1
image = tf.image.resize_image_with_crop_or_pad(image, image_size,
image_size)
image_not_whiten = image
image = tf.image.per_image_standardization(image)
# Creates a QueueRunner for the pre-fetching operation.
images, images_not_whiten, labels = tf.train.batch(
[image, image_not_whiten, label],
batch_size=batch_size,
num_threads=num_threads,
capacity=5 * batch_size)
labels = tf.reshape(labels, [-1])
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
return (images, images_not_whiten, one_hot_labels, dataset.num_samples,
dataset.num_classes)
|
{
"content_hash": "e03645fbcc565c14873dad71bcc984ac",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 78,
"avg_line_length": 35.811764705882354,
"alnum_prop": 0.6521024967148489,
"repo_name": "mfigurnov/sact",
"id": "6bcecc77b3ef260faa13435af83179559c3e5e35",
"size": "3722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cifar_data_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "136526"
}
],
"symlink_target": ""
}
|
import json
import logging
from amqp import Connection as AmqpConnection
from amqp.basic_message import Message as AmqpMessage
from urllib.parse import urlparse
from . import base
log = logging.getLogger("tagia.events")
def _make_rabbitmq_connection(url):
parse_result = urlparse(url)
# Parse host & user/password
try:
(authdata, host) = parse_result.netloc.split("@")
except Exception as e:
raise RuntimeError("Invalid url") from e
try:
(user, password) = authdata.split(":")
except Exception:
(user, password) = ("guest", "guest")
vhost = parse_result.path
return AmqpConnection(host=host, userid=user,
password=password, virtual_host=vhost[1:])
class EventsPushBackend(base.BaseEventsPushBackend):
def __init__(self, url):
self.url = url
def emit_event(self, message:str, *, routing_key:str, channel:str="events"):
connection = _make_rabbitmq_connection(self.url)
try:
rchannel = connection.channel()
message = AmqpMessage(message)
rchannel.exchange_declare(exchange=channel, type="topic", auto_delete=True)
rchannel.basic_publish(message, routing_key=routing_key, exchange=channel)
rchannel.close()
except Exception:
log.error("Unhandled exception", exc_info=True)
finally:
connection.close()
|
{
"content_hash": "ae09597a7a44f2f1fd93165f15ee500a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 87,
"avg_line_length": 28.11764705882353,
"alnum_prop": 0.6443514644351465,
"repo_name": "mattcongy/itshop",
"id": "829dcf3a2d16955bcee5d594a926cbdd8ef80394",
"size": "2169",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker-images/taigav2/taiga-back/taiga/events/backends/rabbitmq.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103474"
},
{
"name": "CoffeeScript",
"bytes": "3380"
},
{
"name": "HTML",
"bytes": "274547"
},
{
"name": "JavaScript",
"bytes": "203660"
},
{
"name": "Nginx",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "3591150"
},
{
"name": "Ruby",
"bytes": "164978"
},
{
"name": "Shell",
"bytes": "5238"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tenma.settings')
app = Celery('tenma')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
{
"content_hash": "c728f765690b2388b47e543525c4bdde",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 30.944444444444443,
"alnum_prop": 0.7648114901256733,
"repo_name": "hmhrex/Tenma",
"id": "84277d91ec222e3786e0aca923ffec714990e205",
"size": "557",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tenma/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49765"
},
{
"name": "HTML",
"bytes": "95901"
},
{
"name": "JavaScript",
"bytes": "68756"
},
{
"name": "Python",
"bytes": "91487"
}
],
"symlink_target": ""
}
|
from ..core import db
import datetime
import json
from bson import ObjectId
from ..models import UserModel
class FileModel(db.Document):
"""CoRR backend file model.
Model that represents the meta-data about a file in CoRR.
Attributes:
created_at: A string value of the creation timestamp.
owner: A reference to the owner of the file.
encoding: A string value of the file encoding.
mimetype: A string value of the file type/format.
size: A Long value of the file size.
name: A string holding the file name.
path: A string containing the file origin path.
storage: A string value of the location.
possible_location: A list of the file storage type.
location: A string value of the file current location with undefined as default.
possible_group: A list of possible CoRR tags to which the file can be associated to.
group: A string value of the current tag (in terme of utility) with undefined as default.
description: A string value of the file description.
extend: A dictionary of to add other fields to the file model.
"""
created_at = db.StringField(default=str(datetime.datetime.utcnow()))
owner = db.ReferenceField(UserModel, reverse_delete_rule=db.CASCADE)
checksum = db.StringField()
encoding = db.StringField()
mimetype = db.StringField()
size = db.LongField()
name = db.StringField()
path = db.StringField()
storage = db.StringField()
possible_location = ["local", "remote", "undefined"]
location = db.StringField(default="undefined", choices=possible_location)
possible_group = ["file", "bundle", "input", "output", "dependencie", "descriptive", "diff", "attach", "picture" , "logo" , "resource", "undefined"]
group = db.StringField(default="undefined", choices=possible_group)
description = db.StringField()
extend = db.DictField()
def info(self):
"""Build a dictionary structure of an file model instance content.
Returns:
The dictionary content of the file model.
"""
data = {'created_at':str(self.created_at), 'id': str(self.id),
'name':self.name, 'encoding':self.encoding, 'mimetype': self.mimetype, 'size': self.size, 'storage': self.storage, 'location':self.location}
if self.owner != None:
data['owner'] = str(self.owner.id)
else:
data['owner'] = 'public'
try:
data["checksum"] = self.checksum
except:
self.checksum = ""
self.save()
data["checksum"] = self.checksum
return data
def extended(self):
"""Add the extend, storage, group, description, owner fields to the built dictionary content.
Returns:
The augmented dictionary.
"""
data = self.info()
data['storage'] = self.storage
data['group'] = self.group
data['description'] = self.description
data['extend'] = self.extend
if self.owner != None:
data['owner'] = self.owner.info()
else:
data['owner'] = 'public'
return data
def to_json(self):
"""Transform the extended dictionary into a pretty json.
Returns:
The pretty json of the extended dictionary.
"""
data = self.extended()
return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
def summary_json(self):
"""Transform the info dictionary with comments field into a pretty json.
Returns:
The pretty json of the info dictionary.
"""
data = self.info()
data['comments'] = len(self.comments)
return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
|
{
"content_hash": "a9cbe75aea07a5762fa491855965278b",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 152,
"avg_line_length": 38.88775510204081,
"alnum_prop": 0.6224088165835738,
"repo_name": "usnistgov/corr",
"id": "144b67b17f018a98735bfd2de94ce8cc989d5f1f",
"size": "3811",
"binary": false,
"copies": "1",
"ref": "refs/heads/v0.2",
"path": "corr-db/corrdb/common/models/file_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "117645"
},
{
"name": "Dockerfile",
"bytes": "9902"
},
{
"name": "HTML",
"bytes": "152178"
},
{
"name": "JavaScript",
"bytes": "1391772"
},
{
"name": "Python",
"bytes": "885399"
}
],
"symlink_target": ""
}
|
import re, sys
if sys.version_info[0] == 3:
from .get_setting import get_setting
else:
from get_setting import get_setting
lower_then_upper_pattern = re.compile('([a-z])([A-Z])')
def module_name(variable_name):
known = known_name(variable_name)
if known:
return known
else:
return guess_name(variable_name)
def known_name(variable_name):
known_modules = get_setting('known_modules_by_variable_name')
if known_modules and variable_name in known_modules:
return known_modules[variable_name]
else:
return None
# param: match containing 2 groups: 'a' and 'B'
# returns: 'a-b'
def to_dashed(match):
return match.group(1) + '-' + match.group(2).lower()
def guess_name(variable_name):
if variable_name[0].isupper():
variable_name = variable_name[0].lower() + variable_name[1:]
return re.sub(lower_then_upper_pattern, to_dashed, variable_name)
|
{
"content_hash": "0a31dc4e6c5369608b0c8f1a90f5a4eb",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 69,
"avg_line_length": 28.303030303030305,
"alnum_prop": 0.6605995717344754,
"repo_name": "psalaets/QuickRequire",
"id": "a0e45e8a29af92974431f9815da02231849e60d9",
"size": "934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "993"
},
{
"name": "Python",
"bytes": "5108"
}
],
"symlink_target": ""
}
|
from vt_manager.communication.sfa.rspecs.elements.element import Element
class Position3D(Element):
fields = [
'x',
'y',
'z',
]
|
{
"content_hash": "896f5a16236a4acd9ee2105b063f20b6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 18.444444444444443,
"alnum_prop": 0.5662650602409639,
"repo_name": "avlach/univbris-ocf",
"id": "d74a9359b989b1a8a53e82d71d6c8846451871e8",
"size": "166",
"binary": false,
"copies": "4",
"ref": "refs/heads/ofelia.opticaldevelopment",
"path": "vt_manager/src/python/vt_manager/communication/sfa/rspecs/elements/position_3d.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127542"
},
{
"name": "JavaScript",
"bytes": "289680"
},
{
"name": "Perl",
"bytes": "4421"
},
{
"name": "Python",
"bytes": "3446617"
},
{
"name": "Racket",
"bytes": "32770"
},
{
"name": "Shell",
"bytes": "7609"
}
],
"symlink_target": ""
}
|
import time
import random
from collections import OrderedDict
import numpy as np
class TrafficLight(object):
"""A traffic light that switches periodically."""
valid_states = [True, False] # True = NS open, False = EW open
def __init__(self, state=None, period=None):
self.state = state if state is not None else random.choice(self.valid_states)
self.period = period if period is not None else random.choice([3, 4, 5])
self.last_updated = 0
def reset(self):
self.last_updated = 0
def update(self, t):
if t - self.last_updated >= self.period:
self.state = not self.state # assuming state is boolean
self.last_updated = t
class Environment(object):
"""Environment within which all agents operate."""
valid_actions = [None, 'forward', 'left', 'right']
valid_inputs = {'light': TrafficLight.valid_states, 'oncoming': valid_actions, 'left': valid_actions, 'right': valid_actions}
valid_headings = [(1, 0), (0, -1), (-1, 0), (0, 1)] # ENWS
hard_time_limit = -100 # even if enforce_deadline is False, end trial when deadline reaches this value (to avoid deadlocks)
def __init__(self):
self.agent_successes = []
self.done = False
self.t = 0
self.agent_states = OrderedDict()
self.status_text = ""
# Road network
self.grid_size = (8, 6) # (cols, rows)
self.bounds = (1, 1, self.grid_size[0], self.grid_size[1])
self.block_size = 100
self.intersections = OrderedDict()
self.roads = []
for x in range(self.bounds[0], self.bounds[2] + 1):
for y in range(self.bounds[1], self.bounds[3] + 1):
# A traffic light key is its x,y position in the environment
self.intersections[(x, y)] = TrafficLight() # a traffic light at each intersection
for a in self.intersections:
for b in self.intersections:
if a == b:
continue
if (abs(a[0] - b[0]) + abs(a[1] - b[1])) == 1: # L1 distance = 1
self.roads.append((a, b))
for _, tl in self.intersections.items():
tl.reset()
# Dummy agents
self.num_dummies = 3 # no. of dummy agents
for i in range(self.num_dummies):
self.create_agent(DummyAgent)
# Primary agent
self.primary_agent = None # to be set explicitly
self.enforce_deadline = False
def create_agent(self, agent_class, *args, **kwargs):
agent = agent_class(self, *args, **kwargs)
self.agent_states[agent] = {'location': random.choice(list(self.intersections.items())), 'heading': (0, 1)}
return agent
def set_primary_agent(self, agent, enforce_deadline=False):
self.primary_agent = agent
self.enforce_deadline = enforce_deadline
def reset(self):
self.done = False
self.t = 0
# Reset traffic lights
for _, traffic_light in self.intersections.items():
traffic_light.reset()
# Pick a start and a destination
start = random.choice(list(self.intersections.keys()))
destination = random.choice(list(self.intersections.keys()))
# Ensure starting location and destination are not too close
while self.compute_dist(start, destination) < 4:
start = random.choice(list(self.intersections.keys()))
destination = random.choice(list(self.intersections.keys()))
start_heading = random.choice(self.valid_headings)
deadline = self.compute_dist(start, destination) * 5
print("Environment.reset(): Trial set up with start = {}, destination = {}, deadline = {}".format(start, destination, deadline))
# Initialize agent(s)
for agent in self.agent_states.keys():
self.agent_states[agent] = {
'location': start if agent is self.primary_agent else random.choice(list(self.intersections.keys())),
'heading': start_heading if agent is self.primary_agent else random.choice(self.valid_headings),
'destination': destination if agent is self.primary_agent else None,
'deadline': deadline if agent is self.primary_agent else None}
agent.reset(destination=(destination if agent is self.primary_agent else None))
def step(self):
# print("Environment.step(): t = {}".format(self.t))
# Update traffic lights
for _, traffic_light in self.intersections.items():
traffic_light.update(self.t)
# Update agents
for agent in self.agent_states.keys():
agent.update(self.t)
self.t += 1
if self.primary_agent is not None:
agent_deadline = self.agent_states[self.primary_agent]['deadline']
if agent_deadline <= self.hard_time_limit:
self.done = True
print("Environment.step(): Primary agent hit hard time limit ({})! Trial aborted.".format(self.hard_time_limit))
elif self.enforce_deadline and agent_deadline <= 0:
self.done = True
self.agent_successes.append(0)
print("Environment.step(): Primary agent ran out of time! Trial aborted.")
self.agent_states[self.primary_agent]['deadline'] = agent_deadline - 1
def sense(self, agent):
assert agent in self.agent_states, "Unknown agent!"
state = self.agent_states[agent]
location = state['location']
heading = state['heading']
light = 'green' if (self.intersections[location].state and heading[1] != 0) or ((not self.intersections[location].state) and heading[0] != 0) else 'red'
# Populate oncoming, left, right
oncoming = None
left = None
right = None
for other_agent, other_state in self.agent_states.items():
if agent == other_agent or location != other_state['location'] or (heading[0] == other_state['heading'][0] and heading[1] == other_state['heading'][1]):
continue
other_heading = other_agent.get_next_waypoint()
if (heading[0] * other_state['heading'][0] + heading[1] * other_state['heading'][1]) == -1:
if oncoming != 'left': # we don't want to override oncoming == 'left'
oncoming = other_heading
elif heading[1] == other_state['heading'][0] and -heading[0] == other_state['heading'][1]:
if right != 'forward' and right != 'left': # we don't want to override right == 'forward or 'left'
right = other_heading
else:
if left != 'forward': # we don't want to override left == 'forward'
left = other_heading
return {'light': light, 'oncoming': oncoming, 'left': left, 'right': right} # TODO: make this a namedtuple
def get_deadline(self, agent):
return self.agent_states[agent]['deadline'] if agent is self.primary_agent else None
def act(self, agent, action):
assert agent in self.agent_states, "Unknown agent!"
assert action in self.valid_actions, "Invalid action!"
state = self.agent_states[agent]
location = state['location']
heading = state['heading']
light = 'green' if (self.intersections[location].state and heading[1] != 0) or ((not self.intersections[location].state) and heading[0] != 0) else 'red'
sense = self.sense(agent)
# Move agent if within bounds and obeys traffic rules
reward = 0 # reward/penalty
move_okay = True
if action == 'forward':
if light != 'green':
move_okay = False
elif action == 'left':
if light == 'green' and (sense['oncoming'] == None or sense['oncoming'] == 'left'):
heading = (heading[1], -heading[0])
else:
move_okay = False
elif action == 'right':
if light == 'green' or sense['left'] != 'straight':
heading = (-heading[1], heading[0])
else:
move_okay = False
if move_okay:
# Valid move (could be null)
if action is not None:
# Valid non-null move
location = ((location[0] + heading[0] - self.bounds[0]) % (self.bounds[2] - self.bounds[0] + 1) + self.bounds[0],
(location[1] + heading[1] - self.bounds[1]) % (self.bounds[3] - self.bounds[1] + 1) + self.bounds[1]) # wrap-around
#if self.bounds[0] <= location[0] <= self.bounds[2] and self.bounds[1] <= location[1] <= self.bounds[3]: # bounded
state['location'] = location
state['heading'] = heading
reward = 2.0 if action == agent.get_next_waypoint() else -0.5 # valid, but is it correct? (as per waypoint)
else:
# Valid null move
reward = 0.0
else:
# Invalid move
reward = -1.0
if agent is self.primary_agent:
if state['location'] == state['destination']:
if state['deadline'] >= 0:
reward += 10 # bonus
self.done = True
self.agent_successes.append(1)
print("Environment.act(): Primary agent has reached destination!")
self.status_text = "state: {}\naction: {}\nreward: {}".format(agent.get_state(), action, reward)
return reward
def compute_dist(self, a, b):
"""L1 distance between two points."""
return abs(b[0] - a[0]) + abs(b[1] - a[1])
def get_num_successes(self):
success_array = np.array(self.agent_successes)
return np.count_nonzero(success_array)
class Agent(object):
"""Base class for all agents."""
def __init__(self, env):
self.env = env
self.state = None
self.next_waypoint = None
self.color = 'cyan'
def reset(self, destination=None):
pass
def update(self, t):
pass
def get_state(self):
return self.state
def get_next_waypoint(self):
return self.next_waypoint
class DummyAgent(Agent):
color_choices = ['blue', 'cyan', 'magenta', 'orange']
def __init__(self, env):
super(DummyAgent, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.next_waypoint = random.choice(Environment.valid_actions[1:])
self.color = random.choice(self.color_choices)
def update(self, t):
inputs = self.env.sense(self)
action_okay = True
if self.next_waypoint == 'right':
if inputs['light'] == 'red' and inputs['left'] == 'forward':
action_okay = False
elif self.next_waypoint == 'forward':
if inputs['light'] == 'red':
action_okay = False
elif self.next_waypoint == 'left':
if inputs['light'] == 'red' or (inputs['oncoming'] == 'forward' or inputs['oncoming'] == 'right'):
action_okay = False
action = None
if action_okay:
action = self.next_waypoint
self.next_waypoint = random.choice(Environment.valid_actions[1:])
reward = self.env.act(self, action)
|
{
"content_hash": "c5d12ddcd40b666827c6d743aeb16533",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 164,
"avg_line_length": 40.992805755395686,
"alnum_prop": 0.573008073008073,
"repo_name": "drpjm/udacity-mle-project4",
"id": "233fb6c85950bd69275dce69441185656caa4526",
"size": "11396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartcab/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29166"
},
{
"name": "TeX",
"bytes": "10579"
}
],
"symlink_target": ""
}
|
from builtins import str
from builtins import object
import re
import csv
import unicodecsv
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.text import ocd_type_id, slugify
from .datasource import Datasource
"""
West Virginia elections have CSV results files for elections after 2006. These files contain precinct-level data for each of the state's
counties, and includes all contests in that county. Prior to 2008, county-level results are contained in office-specific PDF files. The CSV versions of those are contained in the
https://github.com/openelections/openelections-data-wv repository.
"""
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['election']
if any(s in election_id for s in ['2000', '2002', '2004', '2006']):
loader = WVLoaderPre2008()
else:
loader = WVLoader()
loader.run(mapping)
class WVBaseLoader(BaseLoader):
datasource = Datasource()
target_offices = set([
'U.S. President',
'U.S. Senate',
'U.S. House of Representatives',
'Governor',
'Secretary of State',
'Auditor',
'State Treasurer',
'Commissioner of Agriculture',
'Attorney General',
'State Senate',
'House of Delegates',
])
district_offices = set([
'U.S. House of Representatives',
'State Senate',
'House of Delegates',
])
def _skip_row(self, row):
"""
Should this row be skipped?
This should be implemented in subclasses.
"""
return False
class WVLoader(WVBaseLoader):
"""
Parse West Virginia election results for all elections after 2006.
"""
def load(self):
with self._file_handle as csvfile:
results = []
reader = unicodecsv.DictReader(csvfile)
for row in reader:
# Skip non-target offices
if self._skip_row(row):
continue
elif any(s in self.mapping['generated_filename'] for s in ['2008', '2010', '2011']):
if row['Type'] == 'County':
results.append(self._prep_county_result(row))
else:
continue
elif '__precinct__' not in self.mapping['generated_filename']:
if row['CountyName'] == '':
continue
results.append(self._prep_county_result(row))
elif any(county == row['CountyName'] for county in ['Kanawha', 'Marshall', 'Nicholas', 'Cabell']):
results.append(self._prep_github_precinct_result(row))
else:
results.append(self._prep_precinct_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if row['OfficialResults'] and row['OfficialResults'] == 'No':
return True
return row['OfficeDescription'].strip() not in self.target_offices
def _build_contest_kwargs(self, row, primary_type):
kwargs = {
'office': row['OfficeDescription'].strip(),
'district': row['District'].strip(),
'primary_party': row['PartyName'].strip()
}
return kwargs
def _build_candidate_kwargs(self, row):
full_name = row['Name'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _base_kwargs(self, row):
"Build base set of kwargs for RawResult"
# TODO: Can this just be called once?
kwargs = self._build_common_election_kwargs()
contest_kwargs = self._build_contest_kwargs(row, kwargs['primary_type'])
candidate_kwargs = self._build_candidate_kwargs(row)
kwargs.update(contest_kwargs)
kwargs.update(candidate_kwargs)
return kwargs
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
precinct = str(row['Precinct'])
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['CountyName'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'parent_jurisdiction': row['CountyName'],
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(precinct)),
'party': row['PartyName'].strip(),
'votes': self._votes(row['Votes']),
'vote_breakdowns': {},
})
return RawResult(**kwargs)
def _prep_github_precinct_result(self, row):
kwargs = self._base_kwargs(row)
precinct = str(row['precinct'])
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['county'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'parent_jurisdiction': row['county'],
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(precinct)),
'party': row['party'].strip(),
'votes': self._votes(row['votes']),
'vote_breakdowns': {},
})
return RawResult(**kwargs)
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['CountyName'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'county',
'jurisdiction': row['CountyName'],
'ocd_id': county_ocd_id,
'party': row['PartyName'].strip(),
'votes': self._votes(row['Votes']),
'vote_breakdowns': {},
})
return RawResult(**kwargs)
def _votes(self, val):
"""
Returns cleaned version of votes or 0 if it's a non-numeric value.
"""
if val.strip() == '':
return 0
try:
return int(float(val))
except ValueError:
# Count'y convert value from string
return 0
def _writein(self, row):
# sometimes write-in field not present
try:
write_in = row['Write-In?'].strip()
except KeyError:
write_in = None
return write_in
class WVLoaderPre2008(WVBaseLoader):
"""
Loads West Virginia results for 2000-2006.
Format:
West Virginia has PDF files that have been converted to CSV files with office names that correspond
to those used for elections after 2006. Header rows are identical except for statewide offices that
do not contain districts.
"""
def load(self):
headers = [
'year',
'election',
'office',
'party',
'district',
'candidate',
'county',
'votes',
'winner'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'county'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, fieldnames=headers)
for row in reader:
if self._skip_row(row):
continue
if row['county'].strip() == 'Totals':
total_votes = int(row['votes'].strip())
contest_winner = row['winner'].strip()
else:
rr_kwargs = self._common_kwargs.copy()
rr_kwargs['primary_party'] = row['party'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
jurisdiction = row['county'].strip()
rr_kwargs.update({
'party': row['party'].strip(),
'jurisdiction': jurisdiction,
'ocd_id': "{}/county:{}".format(self.mapping['ocd_id'],
ocd_type_id(jurisdiction)),
'office': row['office'].strip(),
'district': row['district'].strip(),
'votes': int(row['votes'].strip()),
'winner': row['winner'].strip(),
'total_votes': total_votes,
'contest_winner': contest_winner
})
results.append(RawResult(**rr_kwargs))
RawResult.objects.insert(results)
def _skip_row(self, row):
return row['office'].strip() not in self.target_offices
def _build_contest_kwargs(self, row):
return {
'office': row['office'].strip(),
'district': row['district'].strip(),
}
def _build_candidate_kwargs(self, row):
return {
'full_name': row['candidate'].strip()
}
|
{
"content_hash": "bb192d98867b42af39744fa5ed51feeb",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 178,
"avg_line_length": 35.75478927203065,
"alnum_prop": 0.5448992713244749,
"repo_name": "openelections/openelections-core",
"id": "c753531bdd4743c1ce74a3f240fd615030210224",
"size": "9332",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "openelex/us/wv/load.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "57395"
},
{
"name": "Python",
"bytes": "949426"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import VCoinTestFramework
from test_framework.util import *
class WalletTest (VCoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 100)
assert_equal(self.nodes[2].getbalance("from1"), 100-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('90'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_vcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_vcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#this should fail
errorString = ""
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("Invalid amount" in errorString, True)
errorString = ""
try:
self.nodes[0].generate("2") #use a string to as block amount parameter must fail because it's not interpreted as amount
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("not an integer" in errorString, True)
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
stop_nodes(self.nodes)
wait_vcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
if __name__ == '__main__':
WalletTest ().main ()
|
{
"content_hash": "38c30ca39f4cf1ec809943972064e47a",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 164,
"avg_line_length": 42.743589743589745,
"alnum_prop": 0.6174479389836318,
"repo_name": "vcoin-project/v",
"id": "c3df9735c10f08fc039583bce2ccc6ab4b34a600",
"size": "11883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/wallet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "708021"
},
{
"name": "C++",
"bytes": "4107591"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "543539"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "M4",
"bytes": "146937"
},
{
"name": "Makefile",
"bytes": "1093629"
},
{
"name": "NSIS",
"bytes": "6503"
},
{
"name": "Objective-C",
"bytes": "2156"
},
{
"name": "Objective-C++",
"bytes": "7232"
},
{
"name": "Protocol Buffer",
"bytes": "2300"
},
{
"name": "Python",
"bytes": "613863"
},
{
"name": "Shell",
"bytes": "1762241"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from registration import signals
from registration.models import RegistrationProfile
from registration.views import ActivationView as BaseActivationView
from registration.views import RegistrationView as BaseRegistrationView
class RegistrationView(BaseRegistrationView):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def register(self, request, **cleaned_data):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
username = cleaned_data[get_user_model().USERNAME_FIELD]
password = cleaned_data['password1']
other_fields = {}
field_name = set([field.name for field in get_user_model()._meta.fields]) - set(['id', get_user_model().USERNAME_FIELD])
for key in cleaned_data:
if key in field_name:
other_fields[key] = cleaned_data[key]
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(username,
password,
site,
**other_fields
)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_success_url(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
class ActivationView(BaseActivationView):
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
activated_user = RegistrationProfile.objects.activate_user(activation_key)
if activated_user:
signals.user_activated.send(sender=self.__class__,
user=activated_user,
request=request)
return activated_user
def get_success_url(self, request, user):
return ('registration_activation_complete', (), {})
|
{
"content_hash": "e8ad7bdc92b2b9ccb4cfb54165893475",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 128,
"avg_line_length": 40.83802816901409,
"alnum_prop": 0.6282117606483877,
"repo_name": "raulgarreta/django-registration",
"id": "d0eff2abd7b892018dc9ec2d42b0da119b886e16",
"size": "5799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registration/backends/default/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "76874"
},
{
"name": "Shell",
"bytes": "2985"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
become: pfexec
short_description: profile based execution
description:
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description:
- User you 'become' to execute the task
- This plugin ignores this setting as pfexec uses it's own ``exec_attr`` to figure this out,
but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions.
default: root
ini:
- section: privilege_escalation
key: become_user
- section: pfexec_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_pfexec_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_PFEXEC_USER
become_exe:
description: Sudo executable
default: pfexec
ini:
- section: privilege_escalation
key: become_exe
- section: pfexec_become_plugin
key: executable
vars:
- name: ansible_become_exe
- name: ansible_pfexec_exe
env:
- name: ANSIBLE_BECOME_EXE
- name: ANSIBLE_PFEXEC_EXE
become_flags:
description: Options to pass to pfexec
default: -H -S -n
ini:
- section: privilege_escalation
key: become_flags
- section: pfexec_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_pfexec_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_PFEXEC_FLAGS
become_pass:
description: pfexec password
required: False
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_pfexec_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_PFEXEC_PASS
ini:
- section: pfexec_become_plugin
key: password
wrap_exe:
description: Toggle to wrap the command pfexec calls in 'shell -c' or not
default: False
type: bool
ini:
- section: pfexec_become_plugin
key: wrap_execution
vars:
- name: ansible_pfexec_wrap_execution
env:
- name: ANSIBLE_PFEXEC_WRAP_EXECUTION
note:
- This plugin ignores ``become_user`` as pfexec uses it's own ``exec_attr`` to figure this out.
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'pfexec'
def build_become_command(self, cmd, shell):
super(BecomeModule, self).build_become_command(cmd, shell)
if not cmd:
return cmd
exe = self.get_option('become_exe') or self.name
flags = self.get_option('become_flags')
noexe = not self.get_option('wrap_exe')
return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
|
{
"content_hash": "9b9777e8c145349038329687e656143e",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 125,
"avg_line_length": 34.71287128712871,
"alnum_prop": 0.5362236166571591,
"repo_name": "thaim/ansible",
"id": "cef870a6d61860cb6deb6d1bb7fa4fcbd554bc28",
"size": "3662",
"binary": false,
"copies": "37",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/plugins/become/pfexec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
import copy
import inspect
import logging
from functools import partialmethod
from gen.exceptions import ValidationError
from pkgpanda.util import json_prettyprint
log = logging.getLogger(__name__)
def get_function_parameters(function):
return set(inspect.signature(function).parameters)
def validate_arguments_strings(arguments: dict):
errors = dict()
# Validate that all keys and vlaues of arguments are strings
for k, v in arguments.items():
if not isinstance(k, str):
errors[''] = "All keys in arguments must be strings. '{}' isn't.".format(k)
if not isinstance(v, str):
errors[k] = ("All values in arguments must be strings. Value for argument {} isn't. " +
"Given value: {}").format(k, v)
if len(errors):
raise ValidationError(errors, set())
class Setter:
# NOTE: value may either be a function or a string.
def __init__(self, name, value, is_optional, conditions, is_user):
assert isinstance(conditions, list)
self.name = name
self.is_optional = is_optional
self.conditions = conditions
self.is_user = is_user
def get_value():
return value
if isinstance(value, str):
self.calc = get_value
self.parameters = set()
else:
assert callable(value), "{} should be a string or callable. Got: {}".format(name, value)
self.calc = value
self.parameters = get_function_parameters(value)
def __repr__(self):
return "<Setter {}{}{}, conditions: {}{}>".format(
self.name,
", optional" if self.is_optional else "",
", user" if self.is_user else "",
self.conditions,
", parameters {}".format(self.parameters))
class Scope:
def __init__(self, name: str, cases=None):
self.name = name
self.cases = cases if cases else dict()
def add_case(self, value: str, target):
assert isinstance(target, Target)
self.cases[value] = target
def __iadd__(self, other):
assert isinstance(other, Scope), "Internal consistency error, expected Scope but got {}".format(type(other))
# Must have the same name and same options in order to be merged (can't have a new
# switch add new, unhandled cases to a switch already included / represented by this scope).
assert self.name == other.name, "Internal consistency error: Trying to merge scopes with " \
"different names: {} and {}".format(self.name, other.name)
assert self.cases.keys() == other.cases.keys(), "Same name / switch variable introduced " \
"with a different set of possible cases. name: {}. First options: {}, Second " \
"options: {}".format(self.name, self.cases.keys(), other.cases.keys())
# Merge the targets for the cases
for name in self.cases:
self.cases[name] += other.cases[name]
return self
def __eq__(self, other):
assert isinstance(other, Scope)
return self.name == other.name and self.cases == other.cases
def __repr__(self):
return "<Scope cases: {}>".format(self.cases.items())
class Target:
# TODO(cmaloney): Make a better API for working with and managing sub scopes. The current
# dictionary of dictionaries is really hard to use right.
def __init__(self, variables=None, sub_scopes=None):
self.variables = variables if variables else set()
self.sub_scopes = sub_scopes if sub_scopes else dict()
def add_variable(self, variable: str):
self.variables.add(variable)
def add_scope(self, scope: Scope):
if scope.name in self.sub_scopes:
self.sub_scopes[scope.name] += scope
else:
self.sub_scopes[scope.name] = scope
def __iadd__(self, other):
assert isinstance(other, Target), "Internal consistency error, expected Target but got {}".format(type(other))
self.variables |= other.variables
# Add all scopes from the other to this, merging all common sub scopes.
for name, scope in other.sub_scopes.items():
if name in self.sub_scopes:
self.sub_scopes[name] += scope
else:
self.sub_scopes[name] = scope
return self
def __repr__(self):
return "<Target variables: {}, sub_scopes: {}>".format(self.variables, self.sub_scopes.items())
def __eq__(self, other):
assert isinstance(other, Target)
return self.variables == other.variables and self.sub_scopes == other.sub_scopes
class Source:
def __init__(self, entry=None, is_user=False,):
self.setters = dict()
self.validate = list()
self.is_user = is_user
if entry:
self.add_entry(entry, False)
def add_setter(self, name, value, is_optional, conditions):
self.setters.setdefault(name, list()).append(Setter(name, value, is_optional, conditions, self.is_user))
def add_conditional_scope(self, scope, conditions):
# TODO(cmaloney): 'defaults' are the same as 'can' and 'must' is identical to 'arguments' except
# that one takes functions and one takes strings. Simplify to just 'can', 'must'.
assert scope.keys() <= {'validate', 'default', 'must', 'conditional'}
self.validate += scope.get('validate', list())
for name, fn in scope.get('must', dict()).items():
self.add_setter(name, fn, False, conditions)
for name, fn in scope.get('default', dict()).items():
self.add_setter(name, fn, True, conditions)
for name, cond_options in scope.get('conditional', dict()).items():
for value, sub_scope in cond_options.items():
self.add_conditional_scope(sub_scope, conditions + [(name, value)])
add_must = partialmethod(add_setter, is_optional=False, conditions=[])
def add_value_dict(self, value_dict):
for name, value in value_dict.items():
self.add_must(name, value)
def remove_setters(self, scope):
def del_setter(name):
if name in self.setters:
del self.setters[name]
for name in scope.get('must', dict()).keys():
del_setter(name)
for name in scope.get('default', dict()).keys():
del_setter(name)
for name, cond_options in scope.get('conditional', dict()).items():
if name in self.setters:
raise NotImplementedError("Should conditional setters overwrite all setters?")
def add_entry(self, entry, replace_existing):
if replace_existing:
self.remove_setters(entry)
self.add_conditional_scope(entry, [])
# NOTE: This exception should never escape the DFSArgumentCalculator
class CalculatorError(Exception):
def __init__(self, message, chain=[]):
assert isinstance(message, str)
assert isinstance(chain, list)
self.message = message
self.chain = chain
super().__init__(message)
# Depth first search argument calculator. Detects cycles, as well as unmet
# dependencies.
# TODO(cmaloney): Separate chain / path building when unwinding from the root
# error messages.
class DFSArgumentCalculator():
def __init__(self, setters, validate_fns):
self._setters = setters
self._arguments = dict()
self.__in_progress = set()
self._errors = dict()
self._unset = set()
# Re-arrange the validation functions so we can more easily access them by
# argument name.
self._validate_by_arg = dict()
self._multi_arg_validate = dict()
for fn in validate_fns:
parameters = get_function_parameters(fn)
# Could build up the single and multi parameter validation function maps in the same
# thing but the timing / handling of when and how we run single vs. multi-parameter
# validation functions is fairly different, the extra bit here simplifies the later code.
if len(parameters) == 1:
self._validate_by_arg[parameters.pop()] = fn
assert not parameters
else:
self._multi_arg_validate[frozenset(parameters)] = fn
def _calculate_argument(self, name):
# Filter out any setters which have predicates / conditions which are
# satisfiably false.
def all_conditions_met(setter):
for condition_name, condition_value in setter.conditions:
try:
if self._get(condition_name) != condition_value:
return False
except CalculatorError as ex:
raise CalculatorError(
ex.message,
ex.chain + ['trying to test condition {}={}'.format(condition_name, condition_value)]) from ex
return True
# Find the right setter to calculate the argument.
feasible = list(filter(all_conditions_met, self._setters.get(name, list())))
if len(feasible) == 0:
self._unset.add(name)
raise CalculatorError("no way to set")
# Filtier out all optional setters if there is more than one way to set.
if len(feasible) > 1:
final_feasible = list(filter(lambda setter: not setter.is_optional, feasible))
assert final_feasible, "Had multiple optionals and no musts. Template internal error: {!r}".format(feasible)
feasible = final_feasible
# Must be calculated but user tried to provide.
if len(feasible) == 2 and (feasible[0].is_user or feasible[1].is_user):
self._errors[name] = ("{} must be calculated, but was explicitly set in the "
"configuration. Remove it from the configuration.").format(name)
raise CalculatorError("{} must be calculated but set twice".format(name))
if len(feasible) > 1:
self._errors[name] = "Internal error: Multiple ways to set {}.".format(name)
raise CalculatorError("multiple ways to set",
["options: {}".format(feasible)])
setter = feasible[0]
# Get values for the parameters, then call. the setter function.
kwargs = {}
for parameter in setter.parameters:
kwargs[parameter] = self._get(parameter)
try:
value = setter.calc(**kwargs)
except AssertionError as ex:
self._errors[name] = ex.args[0]
raise CalculatorError("assertion while calc")
if name in self._validate_by_arg:
try:
self._validate_by_arg[name](value)
except AssertionError as ex:
self._errors[name] = ex.args[0]
raise CalculatorError("assertion while validate")
return value
def _get(self, name):
if name in self._arguments:
if self._arguments[name] is None:
raise CalculatorError("No way to set", [name])
return self._arguments[name]
# Detect cycles by checking if we're in the middle of calculating the
# argument being asked for
if name in self.__in_progress:
raise CalculatorError("Internal error. cycle detected. re-encountered {}".format(name))
self.__in_progress.add(name)
try:
self._arguments[name] = self._calculate_argument(name)
return self._arguments[name]
except CalculatorError as ex:
self._arguments[name] = None
raise CalculatorError(ex.message, ex.chain + ['while calculating {}'.format(name)]) from ex
except:
self._arguments[name] = None
raise
finally:
self.__in_progress.remove(name)
def _calculate_target(self, target):
def evaluate_var(name):
try:
self._get(name)
except CalculatorError as ex:
log.debug("Error calculating %s: %s. Chain: %s", name, ex.message, ex.chain)
for name in target.variables:
evaluate_var(name)
for name, sub_scope in target.sub_scopes.items():
if name not in self._arguments:
evaluate_var(name)
# If the internal arg is None, there was an error, don't check if it
# is a legal choice.
if self._arguments[name] is None:
continue
choice = self._get(name)
if choice not in sub_scope.cases:
self._errors[name] = "Invalid choice {}. Must choose one of {}".format(
choice, ", ".join(sorted(sub_scope.keys())))
continue
self._calculate_target(sub_scope.cases[choice])
# Perform all multi-argument validations
for parameter_set, validate_fn in self._multi_arg_validate.items():
# Build up argument map for validate function. If any arguments are
# unset then skip this validate function.
kwargs = dict()
skip = False
for parameter in parameter_set:
if (parameter not in self._arguments) or (self._arguments[parameter] is None):
skip = True
break
kwargs[parameter] = self._arguments[parameter]
if skip:
continue
# Call the validation function, catching AssertionErrors and turning them into errors in
# the error dictionary.
try:
validate_fn(**kwargs)
except AssertionError as ex:
self._errors[parameter_set] = ex.args[0]
# TODO(cmaloney): Return per-target results with the path to calculate each argument and the
# full set of arguments touched included.
return self._arguments
# Force calculation of all arguments by accessing the arguments in this
# scope and recursively all sub-scopes.
def calculate(self, targets):
for target in targets:
self._calculate_target(target)
if len(self._errors) or len(self._unset):
raise ValidationError(self._errors, self._unset)
return self._arguments
def resolve_configuration(sources: list, targets: list, user_arguments: dict):
# Make sure all user provided arguments are strings.
# TODO(cmaloney): Loosen this restriction / allow arbitrary types as long
# as they all have a gen specific string form.
validate_arguments_strings(user_arguments)
# Merge the sources into a big dictionary of setters + validators, ensuring
# that all setters are either strings or functions.
validate = list()
setters = dict()
# Merge all the config targets into one big group of setters for providing
# to the calculator
# TODO(cmaloney): The setter management / set code is very similar to that in ConfigTarget, they
# could probably be joined.
for source in sources:
for name, setter_list in source.setters.items():
setters.setdefault(name, list())
setters[name] += setter_list
validate += source.validate
# Validate that targets is a list of Targets
for target in targets:
assert isinstance(target, Target), \
"target should be a Target found a {} with value: {}".format(type(target), target)
# TODO(cmaloney): Re-enable this after sorting out how to have "optional" config targets which
# add in extra "acceptable" parameters (SSH Config, AWS Advanced Template config, etc)
# validate_all_arguments_match_parameters(mandatory_parameters, setters, user_arguments)
# Add in all user arguments as setters.
# Happens last so that they are never overwritten with replace_existing=True
user_config = Source(is_user=True)
user_config.add_value_dict(user_arguments)
# Merge all the seters and validate function into one uber list
setters = copy.deepcopy(user_config.setters)
validate = copy.deepcopy(user_config.validate)
for source in sources:
for name, setter_list in source.setters.items():
# TODO(cmaloney): Make a setter manager already...
setters.setdefault(name, list())
setters[name] += setter_list
validate += source.validate
# Use setters to caluclate every required parameter
arguments = DFSArgumentCalculator(setters, validate).calculate(targets)
# Validate all new / calculated arguments are strings.
validate_arguments_strings(arguments)
log.info("Final arguments:" + json_prettyprint(arguments))
# TODO(cmaloney) Give each config target the values for all it's parameters that were hit as
# well as any parameters that led to those parameters.
return arguments
def validate_configuration(sources: list, targets: list, user_arguments: dict):
try:
resolve_configuration(sources, targets, user_arguments)
return {'status': 'ok'}
except ValidationError as ex:
messages = {}
# Defer multi-key validation errors and noramlize them to be single-key
# ones. The multi-key is always less important than the single-key
# messages which is why single-key messages we never overwrite.
# TODO(cmaloney): Teach the whole stack to be able to handle multi-key
# validation errors.
to_do = dict()
for key, msg in ex.errors.items():
if isinstance(key, frozenset):
to_do[key] = msg
continue
assert isinstance(key, str)
messages[key] = {'message': msg}
for keys, msg in to_do.items():
for name in keys:
# Skip ones we already have one message for.
if name in messages:
continue
messages[name] = {'message': msg}
return {
'status': 'errors',
'errors': messages,
'unset': ex.unset
}
|
{
"content_hash": "006568810e37e75d033394813f0e8d4e",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 120,
"avg_line_length": 38.9206008583691,
"alnum_prop": 0.6088107184209075,
"repo_name": "xinxian0458/dcos",
"id": "866cce04d40e06db900a0d00e7363c9cbf856eb3",
"size": "18137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gen/internals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2529"
},
{
"name": "HTML",
"bytes": "1883"
},
{
"name": "Python",
"bytes": "799514"
},
{
"name": "Shell",
"bytes": "58505"
}
],
"symlink_target": ""
}
|
import optparse
import os
import shlex
import sys
import textwrap
from . import version
_DEFAULT_PARALLELISM = 32
_DEFAULT_TIMEOUT = 0 # "infinity" by default
def common_parser():
"""
Create a basic OptionParser with arguments common to all pssh programs.
"""
# The "resolve" conflict handler avoids errors from the hosts option
# conflicting with the help option.
parser = optparse.OptionParser(conflict_handler='resolve',
version=version.VERSION)
# Ensure that options appearing after the command are sent to ssh.
parser.disable_interspersed_args()
parser.epilog = "Example: pssh -h nodes.txt -l irb2 -o /tmp/foo uptime"
parser.add_option('-h', '--hosts', dest='host_files', action='append',
metavar='HOST_FILE',
help='hosts file (each line "[user@]host[:port]")')
parser.add_option('-g', '--groups', dest='host_groups', action='append',
metavar='HOSTGROUPS_FILE',
help='host groups (as defined in /etc/pssh/hostgroups file)')
parser.add_option('-H', '--host', dest='host_strings', action='append',
metavar='HOST_STRING',
help='additional host entries ("[user@]host[:port]")')
parser.add_option('-l', '--user', dest='user',
help='username (OPTIONAL)')
parser.add_option('-p', '--par', dest='par', type='int',
help='max number of parallel threads (OPTIONAL)')
parser.add_option('-o', '--outdir', dest='outdir',
help='output directory for stdout files (OPTIONAL)')
parser.add_option('-e', '--errdir', dest='errdir',
help='output directory for stderr files (OPTIONAL)')
parser.add_option('-t', '--timeout', dest='timeout', type='int',
help='timeout (secs) (0 = no timeout) per host (OPTIONAL)')
parser.add_option('-O', '--option', dest='options', action='append',
metavar='OPTION', help='SSH option (OPTIONAL)')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='turn on warning and diagnostic messages (OPTIONAL)')
parser.add_option('-A', '--askpass', dest='askpass', action='store_true',
help='Ask for a password (OPTIONAL)')
parser.add_option('-x', '--extra-args', action='callback', type='string',
metavar='ARGS', callback=shlex_append, dest='extra',
help='Extra command-line arguments, with processing for '
'spaces, quotes, and backslashes')
parser.add_option('-X', '--extra-arg', dest='extra', action='append',
metavar='ARG', help='Extra command-line argument')
return parser
def common_defaults(**kwargs):
defaults = dict(par=_DEFAULT_PARALLELISM, timeout=_DEFAULT_TIMEOUT)
defaults.update(**kwargs)
envvars = [('user', 'PSSH_USER'),
('par', 'PSSH_PAR'),
('outdir', 'PSSH_OUTDIR'),
('errdir', 'PSSH_ERRDIR'),
('timeout', 'PSSH_TIMEOUT'),
('verbose', 'PSSH_VERBOSE'),
('print_out', 'PSSH_PRINT'),
('askpass', 'PSSH_ASKPASS'),
('inline', 'PSSH_INLINE'),
('recursive', 'PSSH_RECURSIVE'),
('archive', 'PSSH_ARCHIVE'),
('compress', 'PSSH_COMPRESS'),
('localdir', 'PSSH_LOCALDIR'),
]
for option, var, in envvars:
value = os.getenv(var)
if value:
defaults[option] = value
value = os.getenv('PSSH_OPTIONS')
if value:
defaults['options'] = [value]
value = os.getenv('PSSH_HOSTS')
if value:
message1 = ('Warning: the PSSH_HOSTS environment variable is '
'deprecated. Please use the "-h" option instead, and consider '
'creating aliases for convenience. For example:')
message2 = " alias pssh_abc='pssh -h /path/to/hosts_abc'"
sys.stderr.write(textwrap.fill(message1))
sys.stderr.write('\n')
sys.stderr.write(message2)
sys.stderr.write('\n')
defaults['host_files'] = [value]
return defaults
def shlex_append(option, opt_str, value, parser):
"""An optparse callback similar to the append action.
The given value is processed with shlex, and the resulting list is
concatenated to the option's dest list.
"""
lst = getattr(parser.values, option.dest)
if lst is None:
lst = []
setattr(parser.values, option.dest, lst)
lst.extend(shlex.split(value))
|
{
"content_hash": "5836e12d28f69424d04497e295077686",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 40.234234234234236,
"alnum_prop": 0.5989699955217197,
"repo_name": "rlewczuk/pssh",
"id": "2eb14ac05bf75ec24a24066433d6b0e54ab40fcc",
"size": "4549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psshlib/cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "74469"
}
],
"symlink_target": ""
}
|
from urllib.error import URLError
from urllib.request import urlopen
import re
import pymysql
import ssl
from pymysql import Error
# 通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)
def decode_page(page_bytes, charsets=('utf-8',)):
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
# logging.error('Decode:', error)
return page_html
# 获取页面的HTML代码(通过递归实现指定次数的重试操作)
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
# logging.error('URL:', error)
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
# 从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
pattern_regex = re.compile(pattern_str, pattern_ignore_case)
return pattern_regex.findall(page_html) if page_html else []
# 开始执行爬虫程序并对指定的数据进行持久化操作
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
conn = pymysql.connect(host='localhost', port=3306,
database='crawler', user='root',
password='123456', charset='utf8')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
links_list = get_matched_parts(page_html, match_pattern)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html, r'<h1>(.*)<span')
if headings:
param_list.append((headings[0], link))
cursor.executemany('insert into tb_result values (default, %s, %s)',
param_list)
conn.commit()
except Error:
pass
# logging.error('SQL:', error)
finally:
conn.close()
def main():
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']',
max_depth=2)
if __name__ == '__main__':
main()
|
{
"content_hash": "b0fd450d5c06e1c575863ecac2852704",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 96,
"avg_line_length": 34.75,
"alnum_prop": 0.539568345323741,
"repo_name": "tzpBingo/github-trending",
"id": "981213895e966a8e16760b7ba7de6ad109d5868e",
"size": "3125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tmp/example01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
}
|
from io import StringIO
import numpy as np
import pytest
import pandas as pd
from .base import BaseExtensionTests
class BaseParsingTests(BaseExtensionTests):
@pytest.mark.parametrize('engine', ['c', 'python'])
def test_EA_types(self, engine, data):
df = pd.DataFrame({
'with_dtype': pd.Series(data, dtype=str(data.dtype))
})
csv_output = df.to_csv(index=False, na_rep=np.nan)
result = pd.read_csv(StringIO(csv_output), dtype={
'with_dtype': str(data.dtype)
}, engine=engine)
expected = df
self.assert_frame_equal(result, expected)
|
{
"content_hash": "77f9a572a626bf6afec75ad3c615fabf",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 64,
"avg_line_length": 27.17391304347826,
"alnum_prop": 0.6352,
"repo_name": "cbertinato/pandas",
"id": "22787c38b66fbc99b89efca33faac0b7604974cc",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/extension/base/io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394466"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "15010333"
},
{
"name": "Shell",
"bytes": "27209"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
"""Tests for the JSON lines output module."""
import json
import os
import sys
import unittest
from plaso.formatters import manager as formatters_manager
from plaso.lib import timelib
from plaso.output import json_line
from tests.cli import test_lib as cli_test_lib
from tests.output import test_lib
class JSONLinesOutputTest(test_lib.OutputModuleTestCase):
"""Tests for the JSON lines output module."""
def setUp(self):
"""Sets up the objects needed for this test."""
output_mediator = self._CreateOutputMediator()
self._output_writer = cli_test_lib.TestOutputWriter()
self._output_module = json_line.JSONLineOutputModule(output_mediator)
self._output_module.SetOutputWriter(self._output_writer)
self._event_object = test_lib.TestEventObject()
def testWriteHeader(self):
"""Tests the WriteHeader function."""
self._output_module.WriteHeader()
header = self._output_writer.ReadOutput()
self.assertEquals(header, u'')
def testWriteFooter(self):
"""Tests the WriteFooter function."""
self._output_module.WriteFooter()
footer = self._output_writer.ReadOutput()
self.assertEquals(footer, u'')
def testWriteEventBody(self):
"""Tests the WriteEventBody function."""
formatters_manager.FormattersManager.RegisterFormatter(
test_lib.TestEventFormatter)
self._output_module.WriteEventBody(self._event_object)
formatters_manager.FormattersManager.DeregisterFormatter(
test_lib.TestEventFormatter)
# The dict comparison is very picky on Windows hence we
# have to make sure the UUID is a Unicode string.
expected_uuid = u'{0:s}'.format(self._event_object.uuid)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-06-27 18:17:01')
if sys.platform.startswith(u'win'):
# The dict comparison is very picky on Windows hence we
# have to make sure the drive letter is in the same case.
expected_os_location = os.path.abspath(u'\\{0:s}'.format(
os.path.join(u'cases', u'image.dd')))
else:
expected_os_location = u'{0:s}{1:s}'.format(
os.path.sep, os.path.join(u'cases', u'image.dd'))
expected_json_dict = {
u'__type__': u'EventObject',
u'data_type': u'test:output',
u'display_name': u'OS: /var/log/syslog.1',
u'hostname': u'ubuntu',
u'inode': 12345678,
u'message': (
u'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
u'session closed for user root)'),
u'pathspec': {
u'__type__': u'PathSpec',
u'type_indicator': u'TSK',
u'location': u'/var/log/syslog.1',
u'inode': 15,
u'parent': {
u'__type__': u'PathSpec',
u'type_indicator': u'OS',
u'location': expected_os_location,
}
},
u'text': (
u'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
u'session\n closed for user root)'),
u'timestamp': expected_timestamp,
u'username': u'root',
u'uuid': expected_uuid
}
event_body = self._output_writer.ReadOutput()
# We need to compare dicts since we cannot determine the order
# of values in the string.
json_dict = json.loads(event_body)
self.assertEqual(json_dict, expected_json_dict)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "4a00b2e4b7873168207e552c9d0d9c69",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 73,
"avg_line_length": 34.734693877551024,
"alnum_prop": 0.6383666274970623,
"repo_name": "ostree/plaso",
"id": "d5e3f43b4942f9b36a21f925a7672d57b02f59cc",
"size": "3446",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/output/json_line.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1276"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Protocol Buffer",
"bytes": "13930"
},
{
"name": "Python",
"bytes": "3133020"
},
{
"name": "Shell",
"bytes": "47305"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
import pytest
from stellar_sdk import (
AiohttpClient,
Asset,
MuxedAccount,
Network,
ServerAsync,
TransactionEnvelope,
)
from stellar_sdk.account import Thresholds
from stellar_sdk.call_builder.call_builder_async import *
@pytest.mark.slow
@pytest.mark.asyncio
class TestServerAsync:
async def test_load_acount(self):
account_id = "GDV6FVHPY4JH7EEBSJYPQQYZA3OC6TKTM2TAXRHWT4EEL7BJ2BTDQT5D"
horizon_url = "https://horizon.stellar.org"
client = AiohttpClient()
async with ServerAsync(horizon_url, client) as server:
account = await server.load_account(account_id)
assert account.account == MuxedAccount.from_account(account_id)
assert isinstance(account.sequence, int)
assert account.thresholds == Thresholds(1, 2, 3)
async def test_load_acount_muxed_account_str(self):
account_id = (
"MDV6FVHPY4JH7EEBSJYPQQYZA3OC6TKTM2TAXRHWT4EEL7BJ2BTDQAAAAAAAAAAE2KS7Y"
)
horizon_url = "https://horizon.stellar.org"
client = AiohttpClient()
async with ServerAsync(horizon_url, client) as server:
account = await server.load_account(account_id)
assert account.account == MuxedAccount.from_account(account_id)
assert isinstance(account.sequence, int)
assert account.thresholds == Thresholds(1, 2, 3)
async def test_load_acount_muxed_account(self):
account_id = MuxedAccount(
"GDV6FVHPY4JH7EEBSJYPQQYZA3OC6TKTM2TAXRHWT4EEL7BJ2BTDQT5D", 1234
)
horizon_url = "https://horizon.stellar.org"
client = AiohttpClient()
async with ServerAsync(horizon_url, client) as server:
account = await server.load_account(account_id)
assert account.account == account_id
assert isinstance(account.sequence, int)
assert account.thresholds == Thresholds(1, 2, 3)
async def test_fetch_base_fee(self):
horizon_url = "https://horizon.stellar.org"
client = AiohttpClient()
async with ServerAsync(horizon_url, client) as server:
base_fee = await server.fetch_base_fee()
assert isinstance(base_fee, int)
async def test_endpoint(self):
horizon_url = "https://horizon.stellar.org"
client = AiohttpClient()
async with ServerAsync(horizon_url, client) as server:
assert server.accounts() == AccountsCallBuilder(horizon_url, client)
assert server.assets() == AssetsCallBuilder(horizon_url, client)
assert server.claimable_balances() == ClaimableBalancesCallBuilder(
horizon_url, client
)
assert server.data(
"GDV6FVHPY4JH7EEBSJYPQQYZA3OC6TKTM2TAXRHWT4EEL7BJ2BTDQT5D", "hello"
) == DataCallBuilder(
horizon_url,
client,
"GDV6FVHPY4JH7EEBSJYPQQYZA3OC6TKTM2TAXRHWT4EEL7BJ2BTDQT5D",
"hello",
)
assert server.effects() == EffectsCallBuilder(horizon_url, client)
assert server.fee_stats() == FeeStatsCallBuilder(horizon_url, client)
assert server.ledgers() == LedgersCallBuilder(horizon_url, client)
assert server.liquidity_pools() == LiquidityPoolsBuilder(
horizon_url, client
)
assert server.offers() == OffersCallBuilder(horizon_url, client)
assert server.operations() == OperationsCallBuilder(horizon_url, client)
buying = Asset.native()
selling = Asset(
"MOE", "GDV6FVHPY4JH7EEBSJYPQQYZA3OC6TKTM2TAXRHWT4EEL7BJ2BTDQT5D"
)
assert server.orderbook(buying, selling) == OrderbookCallBuilder(
horizon_url, client, buying, selling
)
source = "GAYSHLG75RPSMXWJ5KX7O7STE6RSZTD6NE4CTWAXFZYYVYIFRUVJIBJH"
destination_asset = Asset(
"EUR", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN"
)
destination_amount = "20.0"
assert server.strict_receive_paths(
source, destination_asset, destination_amount
) == StrictReceivePathsCallBuilder(
horizon_url, client, source, destination_asset, destination_amount
)
destination_amount = Decimal("20.0")
assert server.strict_receive_paths(
source, destination_asset, destination_amount
) == StrictReceivePathsCallBuilder(
horizon_url, client, source, destination_asset, destination_amount
)
source_asset = Asset(
"EUR", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN"
)
source_amount = "10.25"
destination = "GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP"
assert server.strict_send_paths(
source_asset, source_amount, destination
) == StrictSendPathsCallBuilder(
horizon_url, client, source_asset, source_amount, destination
)
source_amount = Decimal("10.25")
assert server.strict_send_paths(
source_asset, source_amount, destination
) == StrictSendPathsCallBuilder(
horizon_url, client, source_asset, source_amount, destination
)
assert server.payments() == PaymentsCallBuilder(horizon_url, client)
assert server.root() == RootCallBuilder(horizon_url, client)
base = Asset.native()
counter = Asset(
"MOE", "GDV6FVHPY4JH7EEBSJYPQQYZA3OC6TKTM2TAXRHWT4EEL7BJ2BTDQT5D"
)
resolution = 3600000
start_time = 1565272000000
end_time = 1565278000000
offset = 3600000
assert server.trade_aggregations(
base, counter, resolution, start_time, end_time, offset
) == TradeAggregationsCallBuilder(
horizon_url,
client,
base,
counter,
resolution,
start_time,
end_time,
offset,
)
assert server.trades() == TradesCallBuilder(horizon_url, client)
assert server.transactions() == TransactionsCallBuilder(horizon_url, client)
async def test_submit_transaction_with_xdr(self):
xdr = "AAAAAHI7fpgo+b7tgpiFyYWimjV7L7IOYLwmQS7k7F8SronXAAAAZAE+QT4AAAAJAAAAAQAAAAAAAAAAAAAAAF1MG8cAAAAAAAAAAQAAAAAAAAAAAAAAAOvi1O/HEn+QgZJw+EMZBtwvTVNmpgvE9p8IRfwp0GY4AAAAAAExLQAAAAAAAAAAARKuidcAAABAJVc1ASGp35hUquGNbzzSqWPoTG0zgc89zc4p+19QkgbPqsdyEfHs7+ng9VJA49YneEXRa6Fv7pfKpEigb3VTCg=="
horizon_url = "https://horizon.stellar.org"
client = AiohttpClient()
async with ServerAsync(horizon_url, client) as server:
resp = await server.submit_transaction(xdr, True)
assert resp["envelope_xdr"] == xdr
async def test_submit_transaction_with_te(self):
xdr = "AAAAAHI7fpgo+b7tgpiFyYWimjV7L7IOYLwmQS7k7F8SronXAAAAZAE+QT4AAAAJAAAAAQAAAAAAAAAAAAAAAF1MG8cAAAAAAAAAAQAAAAAAAAAAAAAAAOvi1O/HEn+QgZJw+EMZBtwvTVNmpgvE9p8IRfwp0GY4AAAAAAExLQAAAAAAAAAAARKuidcAAABAJVc1ASGp35hUquGNbzzSqWPoTG0zgc89zc4p+19QkgbPqsdyEfHs7+ng9VJA49YneEXRa6Fv7pfKpEigb3VTCg=="
te = TransactionEnvelope.from_xdr(xdr, Network.PUBLIC_NETWORK_PASSPHRASE)
horizon_url = "https://horizon.stellar.org"
client = AiohttpClient()
async with ServerAsync(horizon_url, client) as server:
resp = await server.submit_transaction(te, True)
assert resp["envelope_xdr"] == xdr
|
{
"content_hash": "704259e2afa77b6c06bdfcf563149f46",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 296,
"avg_line_length": 46.80120481927711,
"alnum_prop": 0.6392071051615394,
"repo_name": "StellarCN/py-stellar-base",
"id": "191efc6e5ba911fd3c4050fc0d92f156b2245e6f",
"size": "7769",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_server_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "2044193"
},
{
"name": "RPC",
"bytes": "76503"
}
],
"symlink_target": ""
}
|
"""This module contains Google Dataproc Metastore operators."""
from datetime import datetime
from time import sleep
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry, exponential_sleep_generator
from google.cloud.metastore_v1 import MetadataExport, MetadataManagementActivity
from google.cloud.metastore_v1.types import Backup, MetadataImport, Service
from google.cloud.metastore_v1.types.metastore import DatabaseDumpSpec, Restore
from google.protobuf.field_mask_pb2 import FieldMask
from googleapiclient.errors import HttpError
from airflow import AirflowException
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.xcom import XCom
from airflow.providers.google.cloud.hooks.dataproc_metastore import DataprocMetastoreHook
from airflow.providers.google.common.links.storage import StorageLink
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstanceKey
from airflow.utils.context import Context
BASE_LINK = "https://console.cloud.google.com"
METASTORE_BASE_LINK = BASE_LINK + "/dataproc/metastore/services/{region}/{service_id}"
METASTORE_BACKUP_LINK = METASTORE_BASE_LINK + "/backups/{resource}?project={project_id}"
METASTORE_BACKUPS_LINK = METASTORE_BASE_LINK + "/backuprestore?project={project_id}"
METASTORE_EXPORT_LINK = METASTORE_BASE_LINK + "/importexport?project={project_id}"
METASTORE_IMPORT_LINK = METASTORE_BASE_LINK + "/imports/{resource}?project={project_id}"
METASTORE_SERVICE_LINK = METASTORE_BASE_LINK + "/config?project={project_id}"
class DataprocMetastoreLink(BaseOperatorLink):
"""Helper class for constructing Dataproc Metastore resource link"""
name = "Dataproc Metastore"
key = "conf"
@staticmethod
def persist(
context: "Context",
task_instance: Union[
"DataprocMetastoreCreateServiceOperator",
"DataprocMetastoreGetServiceOperator",
"DataprocMetastoreRestoreServiceOperator",
"DataprocMetastoreUpdateServiceOperator",
"DataprocMetastoreListBackupsOperator",
"DataprocMetastoreExportMetadataOperator",
],
url: str,
):
task_instance.xcom_push(
context=context,
key=DataprocMetastoreLink.key,
value={
"region": task_instance.region,
"service_id": task_instance.service_id,
"project_id": task_instance.project_id,
"url": url,
},
)
def get_link(
self,
operator,
dttm: Optional[datetime] = None,
ti_key: Optional["TaskInstanceKey"] = None,
) -> str:
if ti_key is not None:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
else:
assert dttm
conf = XCom.get_one(
dag_id=operator.dag.dag_id,
task_id=operator.task_id,
execution_date=dttm,
key=self.key,
)
return (
conf["url"].format(
region=conf["region"],
service_id=conf["service_id"],
project_id=conf["project_id"],
)
if conf
else ""
)
class DataprocMetastoreDetailedLink(BaseOperatorLink):
"""Helper class for constructing Dataproc Metastore detailed resource link"""
name = "Dataproc Metastore resource"
key = "config"
@staticmethod
def persist(
context: "Context",
task_instance: Union[
"DataprocMetastoreCreateBackupOperator",
"DataprocMetastoreCreateMetadataImportOperator",
],
url: str,
resource: str,
):
task_instance.xcom_push(
context=context,
key=DataprocMetastoreDetailedLink.key,
value={
"region": task_instance.region,
"service_id": task_instance.service_id,
"project_id": task_instance.project_id,
"url": url,
"resource": resource,
},
)
def get_link(
self,
operator,
dttm: Optional[datetime] = None,
ti_key: Optional["TaskInstanceKey"] = None,
) -> str:
if ti_key is not None:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
else:
assert dttm
conf = XCom.get_one(
dag_id=operator.dag.dag_id,
task_id=operator.task_id,
execution_date=dttm,
key=DataprocMetastoreDetailedLink.key,
)
return (
conf["url"].format(
region=conf["region"],
service_id=conf["service_id"],
project_id=conf["project_id"],
resource=conf["resource"],
)
if conf
else ""
)
class DataprocMetastoreCreateBackupOperator(BaseOperator):
"""
Creates a new backup in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup: Required. The backup to create. The ``name`` field is ignored. The ID of the created
backup must be provided in the request's ``backup_id`` field.
This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this
should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the backup's
name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or
number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'backup',
'impersonation_chain',
)
template_fields_renderers = {'backup': 'json'}
operator_extra_links = (DataprocMetastoreDetailedLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup: Union[Dict, Backup],
backup_id: str,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup = backup
self.backup_id = backup_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore backup: %s", self.backup_id)
try:
operation = hook.create_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup=self.backup,
backup_id=self.backup_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
backup = hook.wait_for_operation(self.timeout, operation)
self.log.info("Backup %s created successfully", self.backup_id)
except HttpError as err:
if err.resp.status not in (409, '409'):
raise
self.log.info("Backup %s already exists", self.backup_id)
backup = hook.get_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_id=self.backup_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreDetailedLink.persist(
context=context, task_instance=self, url=METASTORE_BACKUP_LINK, resource=self.backup_id
)
return Backup.to_dict(backup)
class DataprocMetastoreCreateMetadataImportOperator(BaseOperator):
"""
Creates a new MetadataImport in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param metadata_import: Required. The metadata import to create. The ``name`` field is ignored. The ID of
the created metadata import must be provided in the request's ``metadata_import_id`` field.
This corresponds to the ``metadata_import`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param metadata_import_id: Required. The ID of the metadata import, which is used as the final component
of the metadata import's name. This value must be between 1 and 64 characters long, begin with a
letter, end with a letter or number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``metadata_import_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'metadata_import',
'impersonation_chain',
)
template_fields_renderers = {'metadata_import': 'json'}
operator_extra_links = (DataprocMetastoreDetailedLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
metadata_import: MetadataImport,
metadata_import_id: str,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.metadata_import = metadata_import
self.metadata_import_id = metadata_import_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore metadata import: %s", self.metadata_import_id)
operation = hook.create_metadata_import(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
metadata_import=self.metadata_import,
metadata_import_id=self.metadata_import_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
metadata_import = hook.wait_for_operation(self.timeout, operation)
self.log.info("Metadata import %s created successfully", self.metadata_import_id)
DataprocMetastoreDetailedLink.persist(
context=context, task_instance=self, url=METASTORE_IMPORT_LINK, resource=self.metadata_import_id
)
return MetadataImport.to_dict(metadata_import)
class DataprocMetastoreCreateServiceOperator(BaseOperator):
"""
Creates a metastore service in a project and location.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param service: Required. The Metastore service to create. The ``name`` field is ignored. The ID of
the created metastore service must be provided in the request's ``service_id`` field.
This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'service',
'impersonation_chain',
)
template_fields_renderers = {'service': 'json'}
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
service: Union[Dict, Service],
service_id: str,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service = service
self.service_id = service_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore service: %s", self.project_id)
try:
operation = hook.create_service(
region=self.region,
project_id=self.project_id,
service=self.service,
service_id=self.service_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
service = hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s created successfully", self.service_id)
except HttpError as err:
if err.resp.status not in (409, '409'):
raise
self.log.info("Instance %s already exists", self.service_id)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
return Service.to_dict(service)
class DataprocMetastoreDeleteBackupOperator(BaseOperator):
"""
Deletes a single backup.
:param project_id: Required. The ID of the Google Cloud project that the backup belongs to.
:param region: Required. The ID of the Google Cloud region that the backup belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the backup's
name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or
number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup_id: str,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup_id = backup_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> None:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Deleting Dataproc Metastore backup: %s", self.backup_id)
operation = hook.delete_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_id=self.backup_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Backup %s deleted successfully", self.project_id)
class DataprocMetastoreDeleteServiceOperator(BaseOperator):
"""
Deletes a single service.
:param request: The request object. Request message for
[DataprocMetastore.DeleteService][google.cloud.metastore.v1.DataprocMetastore.DeleteService].
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
def __init__(
self,
*,
region: str,
project_id: str,
service_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service_id = service_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Deleting Dataproc Metastore service: %s", self.project_id)
operation = hook.delete_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s deleted successfully", self.project_id)
class DataprocMetastoreExportMetadataOperator(BaseOperator):
"""
Exports metadata from a service.
:param destination_gcs_folder: A Cloud Storage URI of a folder, in the format
``gs://<bucket_name>/<path_inside_bucket>``. A sub-folder
``<export_folder>`` containing exported files will be
created below it.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(), StorageLink())
def __init__(
self,
*,
destination_gcs_folder: str,
project_id: str,
region: str,
service_id: str,
request_id: Optional[str] = None,
database_dump_type: Optional[DatabaseDumpSpec] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.destination_gcs_folder = destination_gcs_folder
self.project_id = project_id
self.region = region
self.service_id = service_id
self.request_id = request_id
self.database_dump_type = database_dump_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Exporting metadata from Dataproc Metastore service: %s", self.service_id)
hook.export_metadata(
destination_gcs_folder=self.destination_gcs_folder,
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
request_id=self.request_id,
database_dump_type=self.database_dump_type,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
metadata_export = self._wait_for_export_metadata(hook)
self.log.info("Metadata from service %s exported successfully", self.service_id)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_EXPORT_LINK)
uri = self._get_uri_from_destination(MetadataExport.to_dict(metadata_export)["destination_gcs_uri"])
StorageLink.persist(context=context, task_instance=self, uri=uri, project_id=self.project_id)
return MetadataExport.to_dict(metadata_export)
def _get_uri_from_destination(self, destination_uri: str):
return destination_uri[5:] if destination_uri.startswith("gs://") else destination_uri
def _wait_for_export_metadata(self, hook: DataprocMetastoreHook):
"""
Workaround to check that export was created successfully.
We discovered a issue to parse result to MetadataExport inside the SDK
"""
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
sleep(time_to_wait)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
activities: MetadataManagementActivity = service.metadata_management_activity
metadata_export: MetadataExport = activities.metadata_exports[0]
if metadata_export.state == MetadataExport.State.SUCCEEDED:
return metadata_export
if metadata_export.state == MetadataExport.State.FAILED:
raise AirflowException(
f"Exporting metadata from Dataproc Metastore {metadata_export.name} FAILED"
)
class DataprocMetastoreGetServiceOperator(BaseOperator):
"""
Gets the details of a single service.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
service_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service_id = service_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Gets the details of a single Dataproc Metastore service: %s", self.project_id)
result = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
return Service.to_dict(result)
class DataprocMetastoreListBackupsOperator(BaseOperator):
"""
Lists backups in a service.
:param project_id: Required. The ID of the Google Cloud project that the backup belongs to.
:param region: Required. The ID of the Google Cloud region that the backup belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
filter: Optional[str] = None,
order_by: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.page_size = page_size
self.page_token = page_token
self.filter = filter
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context") -> List[dict]:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Listing Dataproc Metastore backups: %s", self.service_id)
backups = hook.list_backups(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
page_size=self.page_size,
page_token=self.page_token,
filter=self.filter,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_BACKUPS_LINK)
return [Backup.to_dict(backup) for backup in backups]
class DataprocMetastoreRestoreServiceOperator(BaseOperator):
"""
Restores a service from a backup.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_project_id: Required. The ID of the Google Cloud project that the metastore
service backup to restore from.
:param backup_region: Required. The ID of the Google Cloud region that the metastore
service backup to restore from.
:param backup_service_id: Required. The ID of the metastore service backup to restore from, which is
used as the final component of the metastore service's name. This value must be between 2 and 63
characters long inclusive, begin with a letter, end with a letter or number, and consist
of alphanumeric ASCII characters or hyphens.
:param backup_id: Required. The ID of the metastore service backup to restore from
:param restore_type: Optional. The type of restore. If unspecified, defaults to
``METADATA_ONLY``
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup_project_id: str,
backup_region: str,
backup_service_id: str,
backup_id: str,
restore_type: Optional[Restore] = None,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup_project_id = backup_project_id
self.backup_region = backup_region
self.backup_service_id = backup_service_id
self.backup_id = backup_id
self.restore_type = restore_type
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info(
"Restoring Dataproc Metastore service: %s from backup: %s", self.service_id, self.backup_id
)
hook.restore_service(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_project_id=self.backup_project_id,
backup_region=self.backup_region,
backup_service_id=self.backup_service_id,
backup_id=self.backup_id,
restore_type=self.restore_type,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self._wait_for_restore_service(hook)
self.log.info("Service %s restored from backup %s", self.service_id, self.backup_id)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
def _wait_for_restore_service(self, hook: DataprocMetastoreHook):
"""
Workaround to check that restore service was finished successfully.
We discovered an issue to parse result to Restore inside the SDK
"""
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
sleep(time_to_wait)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
activities: MetadataManagementActivity = service.metadata_management_activity
restore_service: Restore = activities.restores[0]
if restore_service.state == Restore.State.SUCCEEDED:
return restore_service
if restore_service.state == Restore.State.FAILED:
raise AirflowException("Restoring service FAILED")
class DataprocMetastoreUpdateServiceOperator(BaseOperator):
"""
Updates the parameters of a single service.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param service: Required. The metastore service to update. The server only merges fields in the service
if they are specified in ``update_mask``.
The metastore service's ``name`` field is used to identify the metastore service to be updated.
This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param update_mask: Required. A field mask used to specify the fields to be overwritten in the metastore
service resource by the update. Fields specified in the ``update_mask`` are relative to the resource
(not to the full request). A field is overwritten if it is in the mask.
This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'project_id',
'impersonation_chain',
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
service: Union[Dict, Service],
update_mask: FieldMask,
request_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.service = service
self.update_mask = update_mask
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: "Context"):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Updating Dataproc Metastore service: %s", self.service.get("name"))
operation = hook.update_service(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
service=self.service,
update_mask=self.update_mask,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s updated successfully", self.service.get("name"))
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
|
{
"content_hash": "33d03d1cb1df579a7ffd632fab4e7628",
"timestamp": "",
"source": "github",
"line_count": 1108,
"max_line_length": 110,
"avg_line_length": 44.72563176895307,
"alnum_prop": 0.6471264831705545,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "4bdf519d2f5b548a184e80260aaa0dab8a58e151",
"size": "50345",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/operators/dataproc_metastore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
qualities,
)
class TeacherTubeIE(InfoExtractor):
IE_NAME = 'teachertube'
IE_DESC = 'teachertube.com videos'
_VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P<id>\d+)'
_TESTS = [{
# flowplayer
'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997',
'md5': 'f9434ef992fd65936d72999951ee254c',
'info_dict': {
'id': '339997',
'ext': 'mp4',
'title': 'Measures of dispersion from a frequency table',
'description': 'Measures of dispersion from a frequency table',
'thumbnail': r're:https?://.*\.(?:jpg|png)',
},
}, {
# jwplayer
'url': 'http://www.teachertube.com/music.php?music_id=8805',
'md5': '01e8352006c65757caf7b961f6050e21',
'info_dict': {
'id': '8805',
'ext': 'mp3',
'title': 'PER ASPERA AD ASTRA',
'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P',
},
}, {
# unavailable video
'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
error = self._search_regex(
r'<div\b[^>]+\bclass=["\']msgBox error[^>]+>([^<]+)', webpage,
'error', default=None)
if error:
raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True)
title = self._html_search_meta('title', webpage, 'title', fatal=True)
TITLE_SUFFIX = ' - TeacherTube'
if title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)].strip()
description = self._html_search_meta('description', webpage, 'description')
if description:
description = description.strip()
quality = qualities(['mp3', 'flv', 'mp4'])
media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage)
media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage))
media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage))
formats = [
{
'url': media_url,
'quality': quality(determine_ext(media_url))
} for media_url in set(media_urls)
]
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(
webpage, default=None) or self._html_search_meta(
'thumbnail', webpage)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
class TeacherTubeUserIE(InfoExtractor):
IE_NAME = 'teachertube:user:collection'
IE_DESC = 'teachertube.com user and collection videos'
_VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?'
_MEDIA_RE = r'''(?sx)
class="?sidebar_thumb_time"?>[0-9:]+</div>
\s*
<a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)"
'''
_TEST = {
'url': 'http://www.teachertube.com/user/profile/rbhagwati2',
'info_dict': {
'id': 'rbhagwati2'
},
'playlist_mincount': 179,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('user')
urls = []
webpage = self._download_webpage(url, user_id)
urls.extend(re.findall(self._MEDIA_RE, webpage))
pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1]
for p in pages:
more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages)))
video_urls = re.findall(self._MEDIA_RE, webpage)
urls.extend(video_urls)
entries = [self.url_result(vurl, 'TeacherTube') for vurl in urls]
return self.playlist_result(entries, user_id)
|
{
"content_hash": "710236b39a7232c8ae419d7d89a8876f",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 145,
"avg_line_length": 34.3828125,
"alnum_prop": 0.5480572597137015,
"repo_name": "aboutsajjad/Bridge",
"id": "1272078c50b8703aa906e055fc57842078542e95",
"size": "4417",
"binary": false,
"copies": "28",
"ref": "refs/heads/master",
"path": "app_packages/youtube_dl/extractor/teachertube.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2532435"
},
{
"name": "C++",
"bytes": "338713"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Objective-C",
"bytes": "3570"
},
{
"name": "Python",
"bytes": "6743963"
},
{
"name": "Ruby",
"bytes": "508"
},
{
"name": "Swift",
"bytes": "33266"
}
],
"symlink_target": ""
}
|
from gui_lib.textline import Textline
from productSuggestion import findSuggestion
import gui_lib.core as core
import curses
class ProductLine (Textline):
def __init__(self, width, attribute=curses.A_NORMAL, attribute_suggestion=curses.A_REVERSE):
super(ProductLine,self).__init__(width, "", attribute)
self.suggestion = ""
self.suggestionID = None
self.currentID = None
self.attribute_suggestion = attribute_suggestion
def draw(self, canvas, offsetx, offsety, minx, miny, maxx, maxy):
if miny > 0 or maxy < 0:
return
super(ProductLine,self).draw(canvas, offsetx, offsety, minx, miny, maxx, maxy)
if self.hasFocus:
for x in range(minx, maxx):
if (x+self.textOffset >= len(self.text)
and x+self.textOffset < len(self.suggestion)):
canvas.addch(offsety, offsetx+x,
self.suggestion[x+self.textOffset], self.attribute_suggestion)
def keyEvent(self,key):
retVal = super(ProductLine,self).keyEvent(key)
if key >= 32 and key < 127:
self.currentID = None;
(self.suggestionID, self.suggestion) = findSuggestion(self.text)
self.attribute = curses.A_NORMAL
if key == curses.KEY_ENTER or key == ord('\n'):
self.currentID = self.suggestionID
self.text = self.suggestion
if self.currentID is not None:
self.attribute = curses.color_pair(core.COLORPAIR_GREEN)
return retVal
|
{
"content_hash": "cca836f7f42b13b8ab663ccaa530373d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 93,
"avg_line_length": 32.357142857142854,
"alnum_prop": 0.7071376011773363,
"repo_name": "davidv1992/madmin",
"id": "dcc137b48051b66e0118bef8d4632e649c591386",
"size": "1359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "factuurinput/productLine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "117851"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
from __future__ import print_function
import os, sys, re, string, io
# the list only for debugging. The real list, used in the real OpenCV build, is specified in CMakeLists.txt
opencv_hdr_list = [
"../../core/include/opencv2/core.hpp",
"../../core/include/opencv2/core/ocl.hpp",
"../../flann/include/opencv2/flann/miniflann.hpp",
"../../ml/include/opencv2/ml.hpp",
"../../imgproc/include/opencv2/imgproc.hpp",
"../../calib3d/include/opencv2/calib3d.hpp",
"../../features2d/include/opencv2/features2d.hpp",
"../../video/include/opencv2/video/tracking.hpp",
"../../video/include/opencv2/video/background_segm.hpp",
"../../objdetect/include/opencv2/objdetect.hpp",
"../../imgcodecs/include/opencv2/imgcodecs.hpp",
"../../videoio/include/opencv2/videoio.hpp",
"../../highgui/include/opencv2/highgui.hpp"
]
"""
Each declaration is [funcname, return_value_type /* in C, not in Python */, <list_of_modifiers>, <list_of_arguments>],
where each element of <list_of_arguments> is 4-element list itself:
[argtype, argname, default_value /* or "" if none */, <list_of_modifiers>]
where the list of modifiers is yet another nested list of strings
(currently recognized are "/O" for output argument, "/S" for static (i.e. class) methods
and "/A value" for the plain C arrays with counters)
"""
class CppHeaderParser(object):
def __init__(self):
self.BLOCK_TYPE = 0
self.BLOCK_NAME = 1
self.PROCESS_FLAG = 2
self.PUBLIC_SECTION = 3
self.CLASS_DECL = 4
self.namespaces = set()
def batch_replace(self, s, pairs):
for before, after in pairs:
s = s.replace(before, after)
return s
def get_macro_arg(self, arg_str, npos):
npos2 = npos3 = arg_str.find("(", npos)
if npos2 < 0:
print("Error: no arguments for the macro at %d" % (self.lineno,))
sys.exit(-1)
balance = 1
while 1:
t, npos3 = self.find_next_token(arg_str, ['(', ')'], npos3+1)
if npos3 < 0:
print("Error: no matching ')' in the macro call at %d" % (self.lineno,))
sys.exit(-1)
if t == '(':
balance += 1
if t == ')':
balance -= 1
if balance == 0:
break
return arg_str[npos2+1:npos3].strip(), npos3
def parse_arg(self, arg_str, argno):
"""
Parses <arg_type> [arg_name]
Returns arg_type, arg_name, modlist, argno, where
modlist is the list of wrapper-related modifiers (such as "output argument", "has counter", ...)
and argno is the new index of an anonymous argument.
That is, if no arg_str is just an argument type without argument name, the argument name is set to
"arg" + str(argno), and then argno is incremented.
"""
modlist = []
# pass 0: extracts the modifiers
if "CV_OUT" in arg_str:
modlist.append("/O")
arg_str = arg_str.replace("CV_OUT", "")
if "CV_IN_OUT" in arg_str:
modlist.append("/IO")
arg_str = arg_str.replace("CV_IN_OUT", "")
isarray = False
npos = arg_str.find("CV_CARRAY")
if npos >= 0:
isarray = True
macro_arg, npos3 = self.get_macro_arg(arg_str, npos)
modlist.append("/A " + macro_arg)
arg_str = arg_str[:npos] + arg_str[npos3+1:]
npos = arg_str.find("CV_CUSTOM_CARRAY")
if npos >= 0:
isarray = True
macro_arg, npos3 = self.get_macro_arg(arg_str, npos)
modlist.append("/CA " + macro_arg)
arg_str = arg_str[:npos] + arg_str[npos3+1:]
arg_str = arg_str.strip()
word_start = 0
word_list = []
npos = -1
#print self.lineno, ":\t", arg_str
# pass 1: split argument type into tokens
while 1:
npos += 1
t, npos = self.find_next_token(arg_str, [" ", "&", "*", "<", ">", ","], npos)
w = arg_str[word_start:npos].strip()
if w == "operator":
word_list.append("operator " + arg_str[npos:].strip())
break
if w not in ["", "const"]:
word_list.append(w)
if t not in ["", " ", "&"]:
word_list.append(t)
if not t:
break
word_start = npos+1
npos = word_start - 1
arg_type = ""
arg_name = ""
angle_stack = []
#print self.lineno, ":\t", word_list
# pass 2: decrypt the list
wi = -1
prev_w = ""
for w in word_list:
wi += 1
if w == "*":
if prev_w == "char" and not isarray:
arg_type = arg_type[:-len("char")] + "c_string"
else:
arg_type += w
continue
elif w == "<":
arg_type += "_"
angle_stack.append(0)
elif w == "," or w == '>':
if not angle_stack:
print("Error at %d: argument contains ',' or '>' not within template arguments" % (self.lineno,))
sys.exit(-1)
if w == ",":
arg_type += "_and_"
elif w == ">":
if angle_stack[0] == 0:
print("Error at %s:%d: template has no arguments" % (self.hname, self.lineno))
sys.exit(-1)
if angle_stack[0] > 1:
arg_type += "_end_"
angle_stack[-1:] = []
elif angle_stack:
arg_type += w
angle_stack[-1] += 1
elif arg_type == "struct":
arg_type += " " + w
elif arg_type and arg_type != "~":
arg_name = " ".join(word_list[wi:])
break
else:
arg_type += w
prev_w = w
counter_str = ""
add_star = False
if ("[" in arg_name) and not ("operator" in arg_str):
#print arg_str
p1 = arg_name.find("[")
p2 = arg_name.find("]",p1+1)
if p2 < 0:
print("Error at %d: no closing ]" % (self.lineno,))
sys.exit(-1)
counter_str = arg_name[p1+1:p2].strip()
if counter_str == "":
counter_str = "?"
if not isarray:
modlist.append("/A " + counter_str.strip())
arg_name = arg_name[:p1]
add_star = True
if not arg_name:
if arg_type.startswith("operator"):
arg_type, arg_name = "", arg_type
else:
arg_name = "arg" + str(argno)
argno += 1
while arg_type.endswith("_end_"):
arg_type = arg_type[:-len("_end_")]
if add_star:
arg_type += "*"
arg_type = self.batch_replace(arg_type, [("std::", ""), ("cv::", ""), ("::", "_")])
return arg_type, arg_name, modlist, argno
def parse_enum(self, decl_str):
l = decl_str
ll = l.split(",")
if ll[-1].strip() == "":
ll = ll[:-1]
prev_val = ""
prev_val_delta = -1
decl = []
for pair in ll:
pv = pair.split("=")
if len(pv) == 1:
prev_val_delta += 1
val = ""
if prev_val:
val = prev_val + "+"
val += str(prev_val_delta)
else:
prev_val_delta = 0
prev_val = val = pv[1].strip()
decl.append(["const " + self.get_dotted_name(pv[0].strip()), val, [], []])
return decl
def parse_class_decl(self, decl_str):
"""
Parses class/struct declaration start in the form:
{class|struct} [CV_EXPORTS] <class_name> [: public <base_class1> [, ...]]
Returns class_name1, <list of base_classes>
"""
l = decl_str
modlist = []
if "CV_EXPORTS_W_MAP" in l:
l = l.replace("CV_EXPORTS_W_MAP", "")
modlist.append("/Map")
if "CV_EXPORTS_W_SIMPLE" in l:
l = l.replace("CV_EXPORTS_W_SIMPLE", "")
modlist.append("/Simple")
npos = l.find("CV_EXPORTS_AS")
if npos >= 0:
macro_arg, npos3 = self.get_macro_arg(l, npos)
modlist.append("=" + macro_arg)
l = l[:npos] + l[npos3+1:]
l = self.batch_replace(l, [("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("public virtual ", " "), ("public ", " "), ("::", ".")]).strip()
ll = re.split(r'\s*[,:]?\s*', l)
ll = [le for le in ll if le]
classname = ll[1]
bases = ll[2:]
return classname, bases, modlist
def parse_func_decl_no_wrap(self, decl_str, static_method = False):
decl_str = (decl_str or "").strip()
virtual_method = False
explicit_method = False
if decl_str.startswith("explicit"):
decl_str = decl_str[len("explicit"):].lstrip()
explicit_method = True
if decl_str.startswith("virtual"):
decl_str = decl_str[len("virtual"):].lstrip()
virtual_method = True
if decl_str.startswith("static"):
decl_str = decl_str[len("static"):].lstrip()
static_method = True
fdecl = decl_str.replace("CV_OUT", "").replace("CV_IN_OUT", "")
fdecl = fdecl.strip().replace("\t", " ")
while " " in fdecl:
fdecl = fdecl.replace(" ", " ")
fname = fdecl[:fdecl.find("(")].strip()
fnpos = fname.rfind(" ")
if fnpos < 0:
fnpos = 0
fname = fname[fnpos:].strip()
rettype = fdecl[:fnpos].strip()
if rettype.endswith("operator"):
fname = ("operator " + fname).strip()
rettype = rettype[:rettype.rfind("operator")].strip()
if rettype.endswith("::"):
rpos = rettype.rfind(" ")
if rpos >= 0:
fname = rettype[rpos+1:].strip() + fname
rettype = rettype[:rpos].strip()
else:
fname = rettype + fname
rettype = ""
apos = fdecl.find("(")
if fname.endswith("operator"):
fname += " ()"
apos = fdecl.find("(", apos+1)
fname = "cv." + fname.replace("::", ".")
decl = [fname, rettype, [], []]
# inline constructor implementation
implmatch = re.match(r"(\(.*?\))\s*:\s*(\w+\(.*?\),?\s*)+", fdecl[apos:])
if bool(implmatch):
fdecl = fdecl[:apos] + implmatch.group(1)
args0str = fdecl[apos+1:fdecl.rfind(")")].strip()
if args0str != "" and args0str != "void":
args0str = re.sub(r"\([^)]*\)", lambda m: m.group(0).replace(',', "@comma@"), args0str)
args0 = args0str.split(",")
args = []
narg = ""
for arg in args0:
narg += arg.strip()
balance_paren = narg.count("(") - narg.count(")")
balance_angle = narg.count("<") - narg.count(">")
if balance_paren == 0 and balance_angle == 0:
args.append(narg.strip())
narg = ""
for arg in args:
dfpos = arg.find("=")
defval = ""
if dfpos >= 0:
defval = arg[dfpos+1:].strip()
else:
dfpos = arg.find("CV_DEFAULT")
if dfpos >= 0:
defval, pos3 = self.get_macro_arg(arg, dfpos)
else:
dfpos = arg.find("CV_WRAP_DEFAULT")
if dfpos >= 0:
defval, pos3 = self.get_macro_arg(arg, dfpos)
if dfpos >= 0:
defval = defval.replace("@comma@", ",")
arg = arg[:dfpos].strip()
pos = len(arg)-1
while pos >= 0 and (arg[pos] in "_[]" or arg[pos].isalpha() or arg[pos].isdigit()):
pos -= 1
if pos >= 0:
aname = arg[pos+1:].strip()
atype = arg[:pos+1].strip()
if aname.endswith("&") or aname.endswith("*") or (aname in ["int", "String", "Mat"]):
atype = (atype + " " + aname).strip()
aname = ""
else:
atype = arg
aname = ""
if aname.endswith("]"):
bidx = aname.find('[')
atype += aname[bidx:]
aname = aname[:bidx]
decl[3].append([atype, aname, defval, []])
if static_method:
decl[2].append("/S")
if virtual_method:
decl[2].append("/V")
if explicit_method:
decl[2].append("/E")
if bool(re.match(r".*\)\s*(const)?\s*=\s*0", decl_str)):
decl[2].append("/A")
if bool(re.match(r".*\)\s*const(\s*=\s*0)?", decl_str)):
decl[2].append("/C")
if "virtual" in decl_str:
print(decl_str)
return decl
def parse_func_decl(self, decl_str):
"""
Parses the function or method declaration in the form:
[([CV_EXPORTS] <rettype>) | CVAPI(rettype)]
[~]<function_name>
(<arg_type1> <arg_name1>[=<default_value1>] [, <arg_type2> <arg_name2>[=<default_value2>] ...])
[const] {; | <function_body>}
Returns the function declaration entry:
[<func name>, <return value C-type>, <list of modifiers>, <list of arguments>] (see above)
"""
if self.wrap_mode:
if not (("CV_EXPORTS_AS" in decl_str) or ("CV_EXPORTS_W" in decl_str) or \
("CV_WRAP" in decl_str) or ("CV_WRAP_AS" in decl_str)):
return []
# ignore old API in the documentation check (for now)
if "CVAPI(" in decl_str and self.wrap_mode:
return []
top = self.block_stack[-1]
func_modlist = []
npos = decl_str.find("CV_EXPORTS_AS")
if npos >= 0:
arg, npos3 = self.get_macro_arg(decl_str, npos)
func_modlist.append("="+arg)
decl_str = decl_str[:npos] + decl_str[npos3+1:]
npos = decl_str.find("CV_WRAP_AS")
if npos >= 0:
arg, npos3 = self.get_macro_arg(decl_str, npos)
func_modlist.append("="+arg)
decl_str = decl_str[:npos] + decl_str[npos3+1:]
# filter off some common prefixes, which are meaningless for Python wrappers.
# note that we do not strip "static" prefix, which does matter;
# it means class methods, not instance methods
decl_str = self.batch_replace(decl_str, [("virtual", ""), ("static inline", ""), ("inline", ""),\
("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("CV_CDECL", ""), ("CV_WRAP ", " "), ("CV_INLINE", "")]).strip()
static_method = False
context = top[0]
if decl_str.startswith("static") and (context == "class" or context == "struct"):
decl_str = decl_str[len("static"):].lstrip()
static_method = True
args_begin = decl_str.find("(")
if decl_str.startswith("CVAPI"):
rtype_end = decl_str.find(")", args_begin+1)
if rtype_end < 0:
print("Error at %d. no terminating ) in CVAPI() macro: %s" % (self.lineno, decl_str))
sys.exit(-1)
decl_str = decl_str[args_begin+1:rtype_end] + " " + decl_str[rtype_end+1:]
args_begin = decl_str.find("(")
if args_begin < 0:
print("Error at %d: no args in '%s'" % (self.lineno, decl_str))
sys.exit(-1)
decl_start = decl_str[:args_begin].strip()
# handle operator () case
if decl_start.endswith("operator"):
args_begin = decl_str.find("(", args_begin+1)
if args_begin < 0:
print("Error at %d: no args in '%s'" % (self.lineno, decl_str))
sys.exit(-1)
decl_start = decl_str[:args_begin].strip()
# TODO: normalize all type of operators
if decl_start.endswith("()"):
decl_start = decl_start[0:-2].rstrip() + " ()"
# constructor/destructor case
if bool(re.match(r'^(\w+::)*(?P<x>\w+)::~?(?P=x)$', decl_start)):
decl_start = "void " + decl_start
rettype, funcname, modlist, argno = self.parse_arg(decl_start, -1)
if argno >= 0:
classname = top[1]
if rettype == classname or rettype == "~" + classname:
rettype, funcname = "", rettype
else:
if bool(re.match('\w+\s+\(\*\w+\)\s*\(.*\)', decl_str)):
return [] # function typedef
elif bool(re.match('\w+\s+\(\w+::\*\w+\)\s*\(.*\)', decl_str)):
return [] # class method typedef
elif bool(re.match('[A-Z_]+', decl_start)):
return [] # it seems to be a macro instantiation
elif "__declspec" == decl_start:
return []
elif bool(re.match(r'\w+\s+\(\*\w+\)\[\d+\]', decl_str)):
return [] # exotic - dynamic 2d array
else:
#print rettype, funcname, modlist, argno
print("Error at %s:%d the function/method name is missing: '%s'" % (self.hname, self.lineno, decl_start))
sys.exit(-1)
if self.wrap_mode and (("::" in funcname) or funcname.startswith("~")):
# if there is :: in function name (and this is in the header file),
# it means, this is inline implementation of a class method.
# Thus the function has been already declared within the class and we skip this repeated
# declaration.
# Also, skip the destructors, as they are always wrapped
return []
funcname = self.get_dotted_name(funcname)
if not self.wrap_mode:
decl = self.parse_func_decl_no_wrap(decl_str, static_method)
decl[0] = funcname
return decl
arg_start = args_begin+1
npos = arg_start-1
balance = 1
angle_balance = 0
# scan the argument list; handle nested parentheses
args_decls = []
args = []
argno = 1
while balance > 0:
npos += 1
t, npos = self.find_next_token(decl_str, ["(", ")", ",", "<", ">"], npos)
if not t:
print("Error: no closing ')' at %d" % (self.lineno,))
print(decl_str)
print(decl_str[arg_start:])
sys.exit(-1)
if t == "<":
angle_balance += 1
if t == ">":
angle_balance -= 1
if t == "(":
balance += 1
if t == ")":
balance -= 1
if (t == "," and balance == 1 and angle_balance == 0) or balance == 0:
# process next function argument
a = decl_str[arg_start:npos].strip()
#print "arg = ", a
arg_start = npos+1
if a:
eqpos = a.find("=")
defval = ""
modlist = []
if eqpos >= 0:
defval = a[eqpos+1:].strip()
else:
eqpos = a.find("CV_DEFAULT")
if eqpos >= 0:
defval, pos3 = self.get_macro_arg(a, eqpos)
else:
eqpos = a.find("CV_WRAP_DEFAULT")
if eqpos >= 0:
defval, pos3 = self.get_macro_arg(a, eqpos)
if defval == "NULL":
defval = "0"
if eqpos >= 0:
a = a[:eqpos].strip()
arg_type, arg_name, modlist, argno = self.parse_arg(a, argno)
if self.wrap_mode:
if arg_type == "InputArray":
arg_type = "Mat"
elif arg_type == "InputOutputArray":
arg_type = "Mat"
modlist.append("/IO")
elif arg_type == "OutputArray":
arg_type = "Mat"
modlist.append("/O")
elif arg_type == "InputArrayOfArrays":
arg_type = "vector_Mat"
elif arg_type == "InputOutputArrayOfArrays":
arg_type = "vector_Mat"
modlist.append("/IO")
elif arg_type == "OutputArrayOfArrays":
arg_type = "vector_Mat"
modlist.append("/O")
defval = self.batch_replace(defval, [("InputArrayOfArrays", "vector<Mat>"),
("InputOutputArrayOfArrays", "vector<Mat>"),
("OutputArrayOfArrays", "vector<Mat>"),
("InputArray", "Mat"),
("InputOutputArray", "Mat"),
("OutputArray", "Mat"),
("noArray", arg_type)]).strip()
args.append([arg_type, arg_name, defval, modlist])
npos = arg_start-1
if static_method:
func_modlist.append("/S")
return [funcname, rettype, func_modlist, args]
def get_dotted_name(self, name):
"""
adds the dot-separated container class/namespace names to the bare function/class name, e.g. when we have
namespace cv {
class A {
public:
f(int);
};
}
the function will convert "A" to "cv.A" and "f" to "cv.A.f".
"""
if not self.block_stack:
return name
if name.startswith("cv."):
return name
qualified_name = (("." in name) or ("::" in name))
n = ""
for b in self.block_stack:
block_type, block_name = b[self.BLOCK_TYPE], b[self.BLOCK_NAME]
if block_type in ["file", "enum"]:
continue
if block_type not in ["struct", "class", "namespace"]:
print("Error at %d: there are non-valid entries in the current block stack " % (self.lineno, self.block_stack))
sys.exit(-1)
if block_name and (block_type == "namespace" or not qualified_name):
n += block_name + "."
n += name.replace("::", ".")
if n.endswith(".Algorithm"):
n = "cv.Algorithm"
return n
def parse_stmt(self, stmt, end_token):
"""
parses the statement (ending with ';' or '}') or a block head (ending with '{')
The function calls parse_class_decl or parse_func_decl when necessary. It returns
<block_type>, <block_name>, <parse_flag>, <declaration>
where the first 3 values only make sense for blocks (i.e. code blocks, namespaces, classes, enums and such)
"""
stack_top = self.block_stack[-1]
context = stack_top[self.BLOCK_TYPE]
stmt_type = ""
if end_token == "{":
stmt_type = "block"
if context == "block":
print("Error at %d: should not call parse_stmt inside blocks" % (self.lineno,))
sys.exit(-1)
if context == "class" or context == "struct":
while 1:
colon_pos = stmt.find(":")
if colon_pos < 0:
break
w = stmt[:colon_pos].strip()
if w in ["public", "protected", "private"]:
if w == "public" or (not self.wrap_mode and w == "protected"):
stack_top[self.PUBLIC_SECTION] = True
else:
stack_top[self.PUBLIC_SECTION] = False
stmt = stmt[colon_pos+1:].strip()
break
# do not process hidden class members and template classes/functions
if not stack_top[self.PUBLIC_SECTION] or stmt.startswith("template"):
return stmt_type, "", False, None
if end_token == "{":
if not self.wrap_mode and stmt.startswith("typedef struct"):
stmt_type = "struct"
try:
classname, bases, modlist = self.parse_class_decl(stmt[len("typedef "):])
except:
print("Error at %s:%d" % (self.hname, self.lineno))
exit(1)
if classname.startswith("_Ipl"):
classname = classname[1:]
decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []]
if bases:
decl[1] = ": " + ", ".join([self.get_dotted_name(b).replace(".","::") for b in bases])
return stmt_type, classname, True, decl
if stmt.startswith("class") or stmt.startswith("struct"):
stmt_type = stmt.split()[0]
if stmt.strip() != stmt_type:
try:
classname, bases, modlist = self.parse_class_decl(stmt)
except:
print("Error at %s:%d" % (self.hname, self.lineno))
exit(1)
decl = []
if ("CV_EXPORTS_W" in stmt) or ("CV_EXPORTS_AS" in stmt) or (not self.wrap_mode):# and ("CV_EXPORTS" in stmt)):
decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []]
if bases:
decl[1] = ": " + ", ".join([self.get_dotted_name(b).replace(".","::") for b in bases])
return stmt_type, classname, True, decl
if stmt.startswith("enum"):
return "enum", "", True, None
if stmt.startswith("namespace"):
stmt_list = stmt.split()
if len(stmt_list) < 2:
stmt_list.append("<unnamed>")
return stmt_list[0], stmt_list[1], True, None
if stmt.startswith("extern") and "\"C\"" in stmt:
return "namespace", "", True, None
if end_token == "}" and context == "enum":
decl = self.parse_enum(stmt)
return "enum", "", False, decl
if end_token == ";" and stmt.startswith("typedef"):
# TODO: handle typedef's more intelligently
return stmt_type, "", False, None
paren_pos = stmt.find("(")
if paren_pos >= 0:
# assume it's function or method declaration,
# since we filtered off the other places where '(' can normally occur:
# - code blocks
# - function pointer typedef's
decl = self.parse_func_decl(stmt)
# we return parse_flag == False to prevent the parser to look inside function/method bodies
# (except for tracking the nested blocks)
return stmt_type, "", False, decl
if (context == "struct" or context == "class") and end_token == ";" and stmt:
# looks like it's member declaration; append the members to the class declaration
class_decl = stack_top[self.CLASS_DECL]
if ("CV_PROP" in stmt): # or (class_decl and ("/Map" in class_decl[2])):
var_modlist = []
if "CV_PROP_RW" in stmt:
var_modlist.append("/RW")
stmt = self.batch_replace(stmt, [("CV_PROP_RW", ""), ("CV_PROP", "")]).strip()
var_list = stmt.split(",")
var_type, var_name1, modlist, argno = self.parse_arg(var_list[0], -1)
var_list = [var_name1] + [i.strip() for i in var_list[1:]]
for v in var_list:
class_decl[3].append([var_type, v, "", var_modlist])
return stmt_type, "", False, None
# something unknown
return stmt_type, "", False, None
def find_next_token(self, s, tlist, p=0):
"""
Finds the next token from the 'tlist' in the input 's', starting from position 'p'.
Returns the first occured token and its position, or ("", len(s)) when no token is found
"""
token = ""
tpos = len(s)
for t in tlist:
pos = s.find(t, p)
if pos < 0:
continue
if pos < tpos:
tpos = pos
token = t
return token, tpos
def parse(self, hname, wmode=True):
"""
The main method. Parses the input file.
Returns the list of declarations (that can be print using print_decls)
"""
self.hname = hname
decls = []
f = io.open(hname, 'rt', encoding='utf-8')
linelist = list(f.readlines())
f.close()
# states:
SCAN = 0 # outside of a comment or preprocessor directive
COMMENT = 1 # inside a multi-line comment
DIRECTIVE = 2 # inside a multi-line preprocessor directive
state = SCAN
self.block_stack = [["file", hname, True, True, None]]
block_head = ""
self.lineno = 0
self.wrap_mode = wmode
for l0 in linelist:
self.lineno += 1
#print self.lineno
l = l0.strip()
if state == SCAN and l.startswith("#"):
state = DIRECTIVE
# fall through to the if state == DIRECTIVE check
if state == DIRECTIVE:
if not l.endswith("\\"):
state = SCAN
continue
if state == COMMENT:
pos = l.find("*/")
if pos < 0:
continue
l = l[pos+2:]
state = SCAN
if state != SCAN:
print("Error at %d: invlid state = %d" % (self.lineno, state))
sys.exit(-1)
while 1:
token, pos = self.find_next_token(l, [";", "\"", "{", "}", "//", "/*"])
if not token:
block_head += " " + l
break
if token == "//":
block_head += " " + l[:pos]
break
if token == "/*":
block_head += " " + l[:pos]
pos = l.find("*/", pos+2)
if pos < 0:
state = COMMENT
break
l = l[pos+2:]
continue
if token == "\"":
pos2 = pos + 1
while 1:
t2, pos2 = self.find_next_token(l, ["\\", "\""], pos2)
if t2 == "":
print("Error at %d: no terminating '\"'" % (self.lineno,))
sys.exit(-1)
if t2 == "\"":
break
pos2 += 2
block_head += " " + l[:pos2+1]
l = l[pos2+1:]
continue
stmt = (block_head + " " + l[:pos]).strip()
stmt = " ".join(stmt.split()) # normalize the statement
stack_top = self.block_stack[-1]
if stmt.startswith("@"):
# Objective C ?
break
decl = None
if stack_top[self.PROCESS_FLAG]:
# even if stack_top[PUBLIC_SECTION] is False, we still try to process the statement,
# since it can start with "public:"
stmt_type, name, parse_flag, decl = self.parse_stmt(stmt, token)
if decl:
if stmt_type == "enum":
for d in decl:
decls.append(d)
else:
decls.append(decl)
if stmt_type == "namespace":
chunks = [block[1] for block in self.block_stack if block[0] == 'namespace'] + [name]
self.namespaces.add('.'.join(chunks))
else:
stmt_type, name, parse_flag = "block", "", False
if token == "{":
if stmt_type == "class":
public_section = False
else:
public_section = True
self.block_stack.append([stmt_type, name, parse_flag, public_section, decl])
if token == "}":
if not self.block_stack:
print("Error at %d: the block stack is empty" % (self.lineno,))
self.block_stack[-1:] = []
if pos+1 < len(l) and l[pos+1] == ';':
pos += 1
block_head = ""
l = l[pos+1:]
return decls
def print_decls(self, decls):
"""
Prints the list of declarations, retrieived by the parse() method
"""
for d in decls:
print(d[0], d[1], ";".join(d[2]))
for a in d[3]:
print(" ", a[0], a[1], a[2], end="")
if a[3]:
print("; ".join(a[3]))
else:
print()
if __name__ == '__main__':
parser = CppHeaderParser()
decls = []
for hname in opencv_hdr_list:
decls += parser.parse(hname)
#for hname in sys.argv[1:]:
#decls += parser.parse(hname, wmode=False)
parser.print_decls(decls)
print(len(decls))
print("namespaces:", " ".join(sorted(parser.namespaces)))
|
{
"content_hash": "315dfb7f3d28d296036e403b0e05e623",
"timestamp": "",
"source": "github",
"line_count": 880,
"max_line_length": 142,
"avg_line_length": 39.16136363636364,
"alnum_prop": 0.4521792118855551,
"repo_name": "apavlenko/opencv",
"id": "ea32a17e76bebb27d6aeb916747f29f191676885",
"size": "34462",
"binary": false,
"copies": "1",
"ref": "refs/heads/copyright_fixes",
"path": "modules/python/src2/hdr_parser.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "7270"
},
{
"name": "C",
"bytes": "11698890"
},
{
"name": "C++",
"bytes": "24280164"
},
{
"name": "Java",
"bytes": "688479"
},
{
"name": "JavaScript",
"bytes": "352"
},
{
"name": "Objective-C",
"bytes": "323668"
},
{
"name": "Python",
"bytes": "719384"
},
{
"name": "Shell",
"bytes": "2521"
},
{
"name": "TeX",
"bytes": "48853"
}
],
"symlink_target": ""
}
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(247, 195)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.edit_text = QtWidgets.QTextEdit(Dialog)
self.edit_text.setObjectName("edit_text")
self.verticalLayout.addWidget(self.edit_text)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Text Editor"))
|
{
"content_hash": "175359c13d5bef90fefad14c8be39576",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 106,
"avg_line_length": 43.925925925925924,
"alnum_prop": 0.7209106239460371,
"repo_name": "GandaG/fomod-editor",
"id": "48689d918c528f6ed2390af594fe63d543ad0428",
"size": "1416",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/ui_templates/window_plaintexteditor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40123"
},
{
"name": "Shell",
"bytes": "2803"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from captcha.fields import CaptchaField
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.forms import PasswordInput
from djanban.apps.password_reseter.models import PasswordResetRequest
# Request new password form
class RequestPasswordResetForm(forms.Form):
username = forms.CharField(label=u"Username")
captcha = CaptchaField(label=u"Fill this captcha to reset your password")
def clean(self):
cleaned_data = super(RequestPasswordResetForm, self).clean()
# Check if user exists
try:
user = User.objects.get(username=self.cleaned_data["username"], is_active=True)
except User.DoesNotExist:
raise ValidationError("No user found with this username")
# Check if user ir valid
if not user or not user.is_active:
raise ValidationError(u"Your username is invalid. Is that right?")
# Check if there is any other pending password reset request
if PasswordResetRequest.user_has_a_pending_new_password_request(user):
raise ValidationError(u"You already has a pending password request.")
cleaned_data["user"] = user
return cleaned_data
# Reset password form
class ResetPasswordForm(forms.Form):
password1 = forms.CharField(label=u"Password", widget=PasswordInput())
password2 = forms.CharField(label=u"Introduce again your password", widget=PasswordInput())
captcha = CaptchaField(label=u"Fill this captcha to reset your password")
def clean(self):
cleaned_data = super(ResetPasswordForm, self).clean()
if cleaned_data.get("password1") != cleaned_data.get("password2"):
raise ValidationError("Passwords don't match")
return cleaned_data
|
{
"content_hash": "d83c93722cba637208388c07d00e04f0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 95,
"avg_line_length": 39.40425531914894,
"alnum_prop": 0.7143628509719222,
"repo_name": "diegojromerolopez/djanban",
"id": "b0a383098efe753bce9053c892e4f4e36fd0874d",
"size": "1853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/djanban/apps/password_reseter/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79709"
},
{
"name": "HTML",
"bytes": "660275"
},
{
"name": "JavaScript",
"bytes": "634320"
},
{
"name": "Python",
"bytes": "993818"
},
{
"name": "Shell",
"bytes": "1732"
},
{
"name": "TypeScript",
"bytes": "71578"
}
],
"symlink_target": ""
}
|
import unittest
import logging
import subprocess
from tempfile import mkdtemp
import time
import socket
import pymongo
from tests.common import load_check
from nose.plugins.skip import SkipTest
PORT1 = 37017
PORT2 = 37018
MAX_WAIT = 150
class TestMongo(unittest.TestCase):
def wait4mongo(self, process, port):
# Somehow process.communicate() hangs
out = process.stdout
loop = 0
while True:
l = out.readline()
if l.find("[initandlisten] waiting for connections on port") > -1:
break
else:
time.sleep(0.1)
loop += 1
if loop >= MAX_WAIT:
break
def setUp(self):
self.agent_config = {
'version': '0.1',
'api_key': 'toto'
}
# Initialize the check from checks_d
self.check = load_check('mongo', {'init_config': {}, 'instances': {}}, self.agent_config)
# Start 2 instances of Mongo in a replica set
dir1 = mkdtemp()
dir2 = mkdtemp()
try:
self.p1 = subprocess.Popen(["mongod",
"--dbpath",
dir1,
"--port",
str(PORT1),
"--replSet",
"testset/%s:%d" % (socket.gethostname(),
PORT2),
"--rest"],
executable="mongod",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Sleep until mongo comes online
self.wait4mongo(self.p1, PORT1)
if self.p1:
# Set up replication
c1 = pymongo.Connection('localhost:%s' % PORT1, slave_okay=True)
self.p2 = subprocess.Popen(["mongod",
"--dbpath",
dir2,
"--port",
str(PORT2),
"--replSet",
"testset/%s:%d" % (socket.gethostname(),
PORT1),
"--rest"],
executable="mongod",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.wait4mongo(self.p2, PORT2)
# Waiting before all members are online
time.sleep(15)
c1.admin.command("replSetInitiate")
# Sleep for 15s until replication is stable
time.sleep(30)
assert pymongo.Connection('localhost:%s' % PORT2)
except Exception:
logging.getLogger().exception("Cannot instantiate mongod properly")
def tearDown(self):
try:
if "p1" in dir(self):
self.p1.terminate()
if "p2" in dir(self):
self.p2.terminate()
except Exception:
logging.getLogger().exception("Cannot terminate mongod instances")
def testMongoCheck(self):
raise SkipTest('Requires MongoDB')
self.config = {
'instances': [{
'server': "mongodb://localhost:%s/test" % PORT1
},
{
'server': "mongodb://localhost:%s/test" % PORT2
}]
}
# Test mongodb with checks_d
self.check = load_check('mongo', self.config, self.agent_config)
# Run the check against our running server
self.check.check(self.config['instances'][0])
# Sleep for 1 second so the rate interval >=1
time.sleep(1)
# Run the check again so we get the rates
self.check.check(self.config['instances'][0])
# Metric assertions
metrics = self.check.get_metrics()
assert metrics
self.assertIsInstance(metrics, list)
self.assertTrue(len(metrics) > 0)
metric_val_checks = {
'mongodb.connections.current': lambda x: x >= 1,
'mongodb.connections.available': lambda x: x >= 1,
'mongodb.uptime': lambda x: x >= 0,
'mongodb.mem.resident': lambda x: x > 0,
'mongodb.mem.virtual': lambda x: x > 0
}
for m in metrics:
metric_name = m[0]
if metric_name in metric_val_checks:
self.assertTrue(metric_val_checks[metric_name](m[2]))
# Run the check against our running server
self.check.check(self.config['instances'][1])
# Sleep for 1 second so the rate interval >=1
time.sleep(1)
# Run the check again so we get the rates
self.check.check(self.config['instances'][1])
# Metric assertions
metrics = self.check.get_metrics()
assert metrics
self.assertIsInstance(metrics, list)
self.assertTrue(len(metrics) > 0)
for m in metrics:
metric_name = m[0]
if metric_name in metric_val_checks:
self.assertTrue(metric_val_checks[metric_name](m[2]))
def testMongoOldConfig(self):
raise SkipTest('Requires MongoDB')
self.agent_config1 = {
'mongodb_server': "mongodb://localhost:%s/test" % PORT1,
'version': '0.1',
'api_key': 'toto'
}
conf1 = self.check.parse_agent_config(self.agent_config1)
self.agent_config2 = {
'mongodb_server': "mongodb://localhost:%s/test" % PORT2,
'version': '0.1',
'api_key': 'toto'
}
conf2 = self.check.parse_agent_config(self.agent_config2)
# Test the first mongodb instance
self.check = load_check('mongo', conf1, self.agent_config1)
# Run the check against our running server
self.check.check(conf1['instances'][0])
# Sleep for 1 second so the rate interval >=1
time.sleep(1)
# Run the check again so we get the rates
self.check.check(conf1['instances'][0])
# Metric assertions
metrics = self.check.get_metrics()
assert metrics
self.assertIsInstance(metrics, list)
self.assertTrue(len(metrics) > 0)
metric_val_checks = {
'mongodb.connections.current': lambda x: x >= 1,
'mongodb.connections.available': lambda x: x >= 1,
'mongodb.uptime': lambda x: x >= 0,
'mongodb.mem.resident': lambda x: x > 0,
'mongodb.mem.virtual': lambda x: x > 0
}
for m in metrics:
metric_name = m[0]
if metric_name in metric_val_checks:
self.assertTrue(metric_val_checks[metric_name](m[2]))
# Test the second mongodb instance
self.check = load_check('mongo', conf2, self.agent_config2)
# Run the check against our running server
self.check.check(conf2['instances'][0])
# Sleep for 1 second so the rate interval >=1
time.sleep(1)
# Run the check again so we get the rates
self.check.check(conf2['instances'][0])
# Metric assertions
metrics = self.check.get_metrics()
assert metrics
self.assertIsInstance(metrics, list)
self.assertTrue(len(metrics) > 0)
for m in metrics:
metric_name = m[0]
if metric_name in metric_val_checks:
self.assertTrue(metric_val_checks[metric_name](m[2]))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "6b7ee0dd90959aa24496406dfdfde8e0",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 97,
"avg_line_length": 36.114155251141554,
"alnum_prop": 0.49424706031103804,
"repo_name": "sapcc/monasca-agent",
"id": "c3a42e13d912c0a7af448a09c25a96bf882bbd84",
"size": "7909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests_to_fix/test_mongo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "1855"
},
{
"name": "Makefile",
"bytes": "3221"
},
{
"name": "Nginx",
"bytes": "1211"
},
{
"name": "PowerShell",
"bytes": "2396"
},
{
"name": "Python",
"bytes": "1280190"
},
{
"name": "Roff",
"bytes": "2000"
},
{
"name": "Shell",
"bytes": "39112"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
from rsqueakvm import constants, storage_classes
from rsqueakvm.model.numeric import W_Float, W_SmallInteger, W_LargeIntegerWord, W_LargeIntegerBig
from rsqueakvm.model.variable import W_BytesObject
from .util import read_image, open_reader, copy_to_module, cleanup_module, InterpreterForTest, slow_test, very_slow_test
def setup_module():
space, interp, _, _ = read_image("mini.image")
w = space.w
def perform_wrapper(receiver, selector, *args):
w_selector = None if isinstance(selector, str) else selector
return interp.perform(receiver, selector, w_selector, list(args))
perform = perform_wrapper
copy_to_module(locals(), __name__)
space.simulate_numeric_primitives.activate()
def teardown_module():
cleanup_module(__name__)
# ------ tests ------------------------------------------
def test_load_image():
pass
@very_slow_test
def test_make_new_class():
sourcecode = """makeNewClass
^ Object
subclass: #MySubForm
instanceVariableNames: 'clippingBox '
classVariableNames: 'ScreenSave '
poolDictionaries: ''
category: 'Graphics-Display Objects'"""
perform(w(0).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_res = perform(w(0), "makeNewClass")
assert isinstance(w_res.strategy, storage_classes.ClassShadow)
assert w_res.strategy.name == "MySubForm"
assert w_res.strategy._instance_size == 1
@very_slow_test
def test_change_class_layout():
sourcecode = """makeChangedClass
^ MessageSet subclass: #ChangedMessageSet
instanceVariableNames: 'changeSet uselessVar'
classVariableNames: ''
poolDictionaries: ''
category: 'Interface-Browser'
"""
perform(w(0).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_res = perform(w(0), "makeChangedClass")
assert w_res.strategy.name == "ChangedMessageSet"
assert w_res.strategy._instance_size == 15
@very_slow_test
def test_become_one_way():
sourcecode = """objectsForwardIdentityTo: to
<primitive: 72>"""
perform(space.w_Array, "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """doIt
| from to oldthing newthing |
Object subclass: #OldThing
instanceVariableNames: ''
classVariableNames: ' '
poolDictionaries: ''
category: 'Pypy'.
Object subclass: #NewThing
instanceVariableNames: 'otherThing'
classVariableNames: ''
poolDictionaries: ''
category: 'Pypy'.
oldthing := (Smalltalk at: #OldThing) new.
newthing := (Smalltalk at: #NewThing) new.
newthing instVarAt: 1 put: oldthing.
from := Array with: oldthing.
to := Array with: newthing.
from objectsForwardIdentityTo: to.
^ Array with: (from at: 1) with: (to at: 1) with: oldthing with: newthing
"""
perform(w(0).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
res_w = space.unwrap_array(perform(w(0), "doIt"))
assert res_w[0].class_shadow(space).name == "NewThing"
assert res_w[0].fetch(space, 0) is res_w[0]
assert res_w[0] is res_w[1]
assert res_w[0] is res_w[2]
assert res_w[0] is res_w[3]
def test_compile_method():
sourcecode = """fib
^self < 2
ifTrue: [ 1 ]
ifFalse: [ (self - 1) fib + (self - 2) fib ]"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
assert perform(w(10), "fib").is_same_object(w(89))
def test_allInstances_in_context():
sourcecode = """aFraction
| a |
a := 5 asInteger.
a := a / 42 asInteger.
^ Fraction allInstances"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aFraction")
result_w = space.unwrap_array(w_result)
assert len(result_w) == 1
pointers_w = result_w[0].fetch_all(space)
assert pointers_w[0].value == 5
assert pointers_w[1].value == 42
def test_become():
sourcecode = """
testBecome
| p1 p2 a |
p1 := 1@2.
p2 := #(3 4 5).
a := p1 -> p2.
(1@2 = a key) ifFalse: [^1].
(#(3 4 5) = a value) ifFalse: [^2].
(p1 -> p2 = a) ifFalse: [^3].
(p1 == a key) ifFalse: [^4].
(p2 == a value) ifFalse: [^5].
p1 become: p2.
(1@2 = a value) ifFalse: [^6].
(3 = (a key at: 1)) ifFalse: [^7].
(4 = (a key at: 2)) ifFalse: [^8].
(5 = (a key at: 3)) ifFalse: [^9].
(p1 -> p2 = a) ifFalse: [^10].
(p1 == a key) ifFalse: [^11].
(p2 == a value) ifFalse: [^12].
^42"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "testBecome")
assert space.unwrap_int(w_result) == 42
def test_cached_methoddict():
sourcecode = """fib
^self < 2
ifTrue: [ 1 ]
ifFalse: [ ((self - 1) fib + (self - 2) fib) + 1 ]"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
assert perform(w(5), "fib").is_same_object(w(15))
sourcecode = """fib
^self < 2
ifTrue: [ 1 ]
ifFalse: [ (self - 1) fib + (self - 2) fib ]"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
assert perform(w(10), "fib").is_same_object(w(89))
def test_compiling_float():
sourcecode = """aFloat
^ 1.1"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aFloat")
assert isinstance(w_result, W_Float)
assert w_result.value == 1.1
def test_compiling_32bit_positive_integer():
sourcecode = """aLargeInteger
^ 16rFFFFFFFF"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aLargeInteger")
if not constants.IS_64BIT:
assert isinstance(w_result, W_LargeIntegerWord)
else:
assert isinstance(w_result, W_SmallInteger)
def test_compiling_64bit_positive_integer():
sourcecode = """aLargeInteger
^ 16rFFFFFFFFFFFFFFFF"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aLargeInteger")
if not constants.IS_64BIT:
assert isinstance(w_result, W_LargeIntegerBig)
else:
assert isinstance(w_result, W_LargeIntegerWord)
assert w_result.unwrap_long_untranslated(space) == 0xFFFFFFFFFFFFFFFF
def test_compiling_128bit_positive_integer():
sourcecode = """aLargeInteger
^ 16rFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aLargeInteger")
assert isinstance(w_result, W_LargeIntegerBig)
assert w_result.unwrap_long_untranslated(space) == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
def test_simulate_numericprim():
sourcecode = """absentPrimitive: anInt with: anotherInt
<primitive: 98>
^'numeric fallback for ', anInt asString, ' ', anotherInt asString"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """simulatePrimitive: aPrimitive args: args
^'numeric simulation for ', args first asString, ' ', args second asString"""
w_sim = perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
# XXX the lookup for that selector is static so the simulation lookup would be failing
interp.image.w_simulatePrimitive = w_sim
w_result = perform(w(10), "absentPrimitive:with:", w(3), w(4))
assert isinstance(w_result, W_BytesObject)
assert w_result.unwrap_string(space) == 'numeric simulation for 3 4'
def test_simulate_numericprim_fallback():
sourcecode = """absentPrimitive: anInt with: anotherInt
|errorCode|
<primitive: 98> "error: errorCode> is not implemented in the mini.image yet"
^'numeric fallback for ', anInt asString, ' ', anotherInt asString, ' because of ', errorCode asString"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """metaPrimFailed: errorCode
<primitive: 255>"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """simulatePrimitive: aPrimitive args: args
^self metaPrimFailed: 123"""
w_sim = perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
# XXX the lookup for that selector is static so the simulation lookup would be failing
interp.image.w_simulatePrimitive = w_sim
w_result = perform(w(10), "absentPrimitive:with:", w(3), w(4))
assert isinstance(w_result, W_BytesObject)
assert w_result.unwrap_string(space) == 'numeric fallback for 3 4 because of 123'
def test_simulate_externalcall():
sourcecode = """absentPrimitive: anInt with: anotherInt
| externalCallTarget |
"do not use <primitive: 'primitiveSimulation' module: 'MyPlugin'> as mini.image doesn't have that yet"
<primitive: 117>
externalCallTarget := #(MyPlugin primitiveSimulation).
^'externalcall fallback for ', anInt asString, ' ', anotherInt asString"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """simulatePrimitive: aPrimitive args: args
^'externalcall simulation for ', args first asString, ' ', args second asString"""
w_sim = perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
# XXX the lookup for that selector is static so the simulation lookup would be failing
interp.image.w_simulatePrimitive = w_sim
w_result = perform(w(10), "absentPrimitive:with:", w(3), w(4))
assert isinstance(w_result, W_BytesObject)
assert w_result.unwrap_string(space) == 'externalcall simulation for 3 4'
def test_snapshotPrimitive(tmpdir):
newname = str(tmpdir.join("test_snapshot.image"))
space, interp, _, _ = read_image("mini.image")
def perform(receiver, selector, *args):
w_selector = None if isinstance(selector, str) else selector
return interp.perform(receiver, selector, w_selector, list(args))
space.simulate_numeric_primitives.activate()
space.set_system_attribute(constants.SYSTEM_ATTRIBUTE_IMAGE_NAME_INDEX, newname)
w_result = perform(space.w_smalltalkdict, "snapshotPrimitive")
assert w_result is space.w_false
space2, interp2, image2, reader2 = read_image(newname)
for f,n in {
'w_true': 'True', 'w_false': 'False', 'w_nil': 'UndefinedObject'
}.iteritems():
assert getattr(space, f).getclass(space).as_class_get_shadow(space).name == getattr(space2, f).getclass(space2).as_class_get_shadow(space).name
for f in [
'w_doesNotUnderstand',
'w_mustBeBoolean'
]:
assert space.unwrap_string(getattr(space, f)) == space2.unwrap_string(getattr(space2, f))
def test_convert_words_to_bytes():
sourcecode = """primitiveChangeClassTo: anObject
<primitive: 115>
"""
w_s = perform(space.w_Bitmap, "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """calcEndianness
| wordThenBytes |
wordThenBytes := Bitmap with: 16r01020304.
wordThenBytes primitiveChangeClassTo: ByteArray basicNew.
wordThenBytes first = 4 ifTrue: [^ #little].
^ #big"""
w_s = perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
assert perform(w(5), w_s).unwrap_string(space) == "little"
|
{
"content_hash": "1048dbc1f4a48028dd8217cd301c9a2b",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 151,
"avg_line_length": 44.204301075268816,
"alnum_prop": 0.6256385307711019,
"repo_name": "HPI-SWA-Lab/RSqueak",
"id": "c10c1827be5cceca8a82262f197e5605a1bd7e2f",
"size": "12333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rsqueakvm/test/test_miniimage_compiling.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1638"
},
{
"name": "C",
"bytes": "115644"
},
{
"name": "HTML",
"bytes": "4754"
},
{
"name": "PowerShell",
"bytes": "1691"
},
{
"name": "Python",
"bytes": "1140634"
},
{
"name": "Shell",
"bytes": "18715"
},
{
"name": "Smalltalk",
"bytes": "71208"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.