code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python3
"""Play a web stream.
ffmpeg-python (https://github.com/kkroening/ffmpeg-python) has to be installed.
If you don't know a stream URL, try http://icecast.spc.org:8000/longplayer
(see https://longplayer.org/ for a description).
"""
import argparse
import queue
import sys
import ffmpeg
import sounddevice as sd
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'url', metavar='URL',
help='stream URL')
parser.add_argument(
'-d', '--device', type=int_or_str,
help='output device (numeric ID or substring)')
parser.add_argument(
'-b', '--blocksize', type=int, default=1024,
help='block size (default: %(default)s)')
parser.add_argument(
'-q', '--buffersize', type=int, default=20,
help='number of blocks used for buffering (default: %(default)s)')
args = parser.parse_args(remaining)
if args.blocksize == 0:
parser.error('blocksize must not be zero')
if args.buffersize < 1:
parser.error('buffersize must be at least 1')
q = queue.Queue(maxsize=args.buffersize)
print('Getting stream information ...')
try:
info = ffmpeg.probe(args.url)
except ffmpeg.Error as e:
sys.stderr.buffer.write(e.stderr)
parser.exit(e)
streams = info.get('streams', [])
if len(streams) != 1:
parser.exit('There must be exactly one stream available')
stream = streams[0]
if stream.get('codec_type') != 'audio':
parser.exit('The stream must be an audio stream')
channels = stream['channels']
samplerate = float(stream['sample_rate'])
def callback(outdata, frames, time, status):
assert frames == args.blocksize
if status.output_underflow:
print('Output underflow: increase blocksize?', file=sys.stderr)
raise sd.CallbackAbort
assert not status
try:
data = q.get_nowait()
except queue.Empty as e:
print('Buffer is empty: increase buffersize?', file=sys.stderr)
raise sd.CallbackAbort from e
assert len(data) == len(outdata)
outdata[:] = data
try:
print('Opening stream ...')
process = ffmpeg.input(
args.url
).output(
'pipe:',
format='f32le',
acodec='pcm_f32le',
ac=channels,
ar=samplerate,
loglevel='quiet',
).run_async(pipe_stdout=True)
stream = sd.RawOutputStream(
samplerate=samplerate, blocksize=args.blocksize,
device=args.device, channels=channels, dtype='float32',
callback=callback)
read_size = args.blocksize * channels * stream.samplesize
print('Buffering ...')
for _ in range(args.buffersize):
q.put_nowait(process.stdout.read(read_size))
print('Starting Playback ...')
with stream:
timeout = args.blocksize * args.buffersize / samplerate
while True:
q.put(process.stdout.read(read_size), timeout=timeout)
except KeyboardInterrupt:
parser.exit('\nInterrupted by user')
except queue.Full:
# A timeout occurred, i.e. there was an error in the callback
parser.exit(1)
except Exception as e:
parser.exit(type(e).__name__ + ': ' + str(e))
|
spatialaudio/python-sounddevice
|
examples/play_stream.py
|
Python
|
mit
| 3,596
|
# Compute grades using real division, with no integer truncation
from __future__ import division
import json
import logging
import random
from collections import defaultdict
from functools import partial
import dogstats_wrapper as dog_stats_api
from django.conf import settings
from django.core.cache import cache
from django.test.client import RequestFactory
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import BlockUsageLocator
from openedx.core.lib.gating import api as gating_api
from courseware import courses
from courseware.access import has_access
from courseware.model_data import FieldDataCache, ScoresClient
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
from student.models import anonymous_id_for_user
from util.db import outer_atomic
from util.module_utils import yield_dynamic_descriptor_descendants
from xmodule import graders
from xmodule.graders import Score
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .models import StudentModule
from .module_render import get_module_for_descriptor
log = logging.getLogger("edx.courseware")
class MaxScoresCache(object):
"""
A cache for unweighted max scores for problems.
The key assumption here is that any problem that has not yet recorded a
score for a user is worth the same number of points. An XBlock is free to
score one student at 2/5 and another at 1/3. But a problem that has never
issued a score -- say a problem two students have only seen mentioned in
their progress pages and never interacted with -- should be worth the same
number of points for everyone.
"""
def __init__(self, cache_prefix):
self.cache_prefix = cache_prefix
self._max_scores_cache = {}
self._max_scores_updates = {}
@classmethod
def create_for_course(cls, course):
"""
Given a CourseDescriptor, return a correctly configured `MaxScoresCache`
This method will base the `MaxScoresCache` cache prefix value on the
last time something was published to the live version of the course.
This is so that we don't have to worry about stale cached values for
max scores -- any time a content change occurs, we change our cache
keys.
"""
if course.subtree_edited_on is None:
# check for subtree_edited_on because old XML courses doesn't have this attribute
cache_key = u"{}".format(course.id)
else:
cache_key = u"{}.{}".format(course.id, course.subtree_edited_on.isoformat())
return cls(cache_key)
def fetch_from_remote(self, locations):
"""
Populate the local cache with values from django's cache
"""
remote_dict = cache.get_many([self._remote_cache_key(loc) for loc in locations])
self._max_scores_cache = {
self._local_cache_key(remote_key): value
for remote_key, value in remote_dict.items()
if value is not None
}
def push_to_remote(self):
"""
Update the remote cache
"""
if self._max_scores_updates:
cache.set_many(
{
self._remote_cache_key(key): value
for key, value in self._max_scores_updates.items()
},
60 * 60 * 24 # 1 day
)
def _remote_cache_key(self, location):
"""Convert a location to a remote cache key (add our prefixing)."""
return u"grades.MaxScores.{}___{}".format(self.cache_prefix, unicode(location))
def _local_cache_key(self, remote_key):
"""Convert a remote cache key to a local cache key (i.e. location str)."""
return remote_key.split(u"___", 1)[1]
def num_cached_from_remote(self):
"""How many items did we pull down from the remote cache?"""
return len(self._max_scores_cache)
def num_cached_updates(self):
"""How many local updates are we waiting to push to the remote cache?"""
return len(self._max_scores_updates)
def set(self, location, max_score):
"""
Adds a max score to the max_score_cache
"""
loc_str = unicode(location)
if self._max_scores_cache.get(loc_str) != max_score:
self._max_scores_updates[loc_str] = max_score
def get(self, location):
"""
Retrieve a max score from the cache
"""
loc_str = unicode(location)
max_score = self._max_scores_updates.get(loc_str)
if max_score is None:
max_score = self._max_scores_cache.get(loc_str)
return max_score
class ProgressSummary(object):
"""
Wrapper class for the computation of a user's scores across a course.
Attributes
chapters: a summary of all sections with problems in the course. It is
organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded
and ungraded problems, and is good for displaying a course summary with
due dates, etc.
weighted_scores: a dictionary mapping module locations to weighted Score
objects.
locations_to_children: a dictionary mapping module locations to their
direct descendants.
"""
def __init__(self, chapters, weighted_scores, locations_to_children):
self.chapters = chapters
self.weighted_scores = weighted_scores
self.locations_to_children = locations_to_children
def score_for_module(self, location):
"""
Calculate the aggregate weighted score for any location in the course.
This method returns a tuple containing (earned_score, possible_score).
If the location is of 'problem' type, this method will return the
possible and earned scores for that problem. If the location refers to a
composite module (a vertical or section ) the scores will be the sums of
all scored problems that are children of the chosen location.
"""
if location in self.weighted_scores:
score = self.weighted_scores[location]
return score.earned, score.possible
children = self.locations_to_children[location]
earned = 0.0
possible = 0.0
for child in children:
child_earned, child_possible = self.score_for_module(child)
earned += child_earned
possible += child_possible
return earned, possible
def descriptor_affects_grading(block_types_affecting_grading, descriptor):
"""
Returns True if the descriptor could have any impact on grading, else False.
Something might be a scored item if it is capable of storing a score
(has_score=True). We also have to include anything that can have children,
since those children might have scores. We can avoid things like Videos,
which have state but cannot ever impact someone's grade.
"""
return descriptor.location.block_type in block_types_affecting_grading
def field_data_cache_for_grading(course, user):
"""
Given a CourseDescriptor and User, create the FieldDataCache for grading.
This will generate a FieldDataCache that only loads state for those things
that might possibly affect the grading process, and will ignore things like
Videos.
"""
descriptor_filter = partial(descriptor_affects_grading, course.block_types_affecting_grading)
return FieldDataCache.cache_for_descriptor_descendents(
course.id,
user,
course,
depth=None,
descriptor_filter=descriptor_filter
)
def answer_distributions(course_key):
"""
Given a course_key, return answer distributions in the form of a dictionary
mapping:
(problem url_name, problem display_name, problem_id) -> {dict: answer -> count}
Answer distributions are found by iterating through all StudentModule
entries for a given course with type="problem" and a grade that is not null.
This means that we only count LoncapaProblems that people have submitted.
Other types of items like ORA or sequences will not be collected. Empty
Loncapa problem state that gets created from runnig the progress page is
also not counted.
This method accesses the StudentModule table directly instead of using the
CapaModule abstraction. The main reason for this is so that we can generate
the report without any side-effects -- we don't have to worry about answer
distribution potentially causing re-evaluation of the student answer. This
also allows us to use the read-replica database, which reduces risk of bad
locking behavior. And quite frankly, it makes this a lot less confusing.
Also, we're pulling all available records from the database for this course
rather than crawling through a student's course-tree -- the latter could
potentially cause us trouble with A/B testing. The distribution report may
not be aware of problems that are not visible to the user being used to
generate the report.
This method will try to use a read-replica database if one is available.
"""
# dict: { module.module_state_key : (url_name, display_name) }
state_keys_to_problem_info = {} # For caching, used by url_and_display_name
def url_and_display_name(usage_key):
"""
For a given usage_key, return the problem's url and display_name.
Handle modulestore access and caching. This method ignores permissions.
Raises:
InvalidKeyError: if the usage_key does not parse
ItemNotFoundError: if there is no content that corresponds
to this usage_key.
"""
problem_store = modulestore()
if usage_key not in state_keys_to_problem_info:
problem = problem_store.get_item(usage_key)
problem_info = (problem.url_name, problem.display_name_with_default_escaped)
state_keys_to_problem_info[usage_key] = problem_info
return state_keys_to_problem_info[usage_key]
# Iterate through all problems submitted for this course in no particular
# order, and build up our answer_counts dict that we will eventually return
answer_counts = defaultdict(lambda: defaultdict(int))
for module in StudentModule.all_submitted_problems_read_only(course_key):
try:
state_dict = json.loads(module.state) if module.state else {}
raw_answers = state_dict.get("student_answers", {})
except ValueError:
log.error(
u"Answer Distribution: Could not parse module state for StudentModule id=%s, course=%s",
module.id,
course_key,
)
continue
try:
url, display_name = url_and_display_name(module.module_state_key.map_into_course(course_key))
# Each problem part has an ID that is derived from the
# module.module_state_key (with some suffix appended)
for problem_part_id, raw_answer in raw_answers.items():
# Convert whatever raw answers we have (numbers, unicode, None, etc.)
# to be unicode values. Note that if we get a string, it's always
# unicode and not str -- state comes from the json decoder, and that
# always returns unicode for strings.
answer = unicode(raw_answer)
answer_counts[(url, display_name, problem_part_id)][answer] += 1
except (ItemNotFoundError, InvalidKeyError):
msg = (
"Answer Distribution: Item {} referenced in StudentModule {} " +
"for user {} in course {} not found; " +
"This can happen if a student answered a question that " +
"was later deleted from the course. This answer will be " +
"omitted from the answer distribution CSV."
).format(
module.module_state_key, module.id, module.student_id, course_key
)
log.warning(msg)
continue
return answer_counts
def grade(student, request, course, keep_raw_scores=False, field_data_cache=None, scores_client=None):
"""
Returns the grade of the student.
Also sends a signal to update the minimum grade requirement status.
"""
grade_summary = _grade(student, request, course, keep_raw_scores, field_data_cache, scores_client)
responses = GRADES_UPDATED.send_robust(
sender=None,
username=student.username,
grade_summary=grade_summary,
course_key=course.id,
deadline=course.end
)
for receiver, response in responses:
log.info('Signal fired when student grade is calculated. Receiver: %s. Response: %s', receiver, response)
return grade_summary
def _grade(student, request, course, keep_raw_scores, field_data_cache, scores_client):
"""
Unwrapped version of "grade"
This grades a student as quickly as possible. It returns the
output from the course grader, augmented with the final letter
grade. The keys in the output are:
course: a CourseDescriptor
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
- keep_raw_scores : if True, then value for key 'raw_scores' contains scores
for every graded module
More information on the format is in the docstring for CourseGrader.
"""
with outer_atomic():
if field_data_cache is None:
field_data_cache = field_data_cache_for_grading(course, student)
if scores_client is None:
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
# scores that were registered with the submissions API, which for the moment
# means only openassessment (edx-ora2)
# We need to import this here to avoid a circular dependency of the form:
# XBlock --> submissions --> Django Rest Framework error strings -->
# Django translation --> ... --> courseware --> submissions
from submissions import api as sub_api # installed from the edx-submissions repository
with outer_atomic():
submissions_scores = sub_api.get_scores(
course.id.to_deprecated_string(),
anonymous_id_for_user(student, course.id)
)
max_scores_cache = MaxScoresCache.create_for_course(course)
# For the moment, we have to get scorable_locations from field_data_cache
# and not from scores_client, because scores_client is ignorant of things
# in the submissions API. As a further refactoring step, submissions should
# be hidden behind the ScoresClient.
max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)
grading_context = course.grading_context
raw_scores = []
totaled_scores = {}
# This next complicated loop is just to collect the totaled_scores, which is
# passed to the grader
for section_format, sections in grading_context['graded_sections'].iteritems():
format_scores = []
for section in sections:
section_descriptor = section['section_descriptor']
section_name = section_descriptor.display_name_with_default_escaped
with outer_atomic():
# some problems have state that is updated independently of interaction
# with the LMS, so they need to always be scored. (E.g. combinedopenended ORA1)
# TODO This block is causing extra savepoints to be fired that are empty because no queries are executed
# during the loop. When refactoring this code please keep this outer_atomic call in mind and ensure we
# are not making unnecessary database queries.
should_grade_section = any(
descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
)
# If there are no problems that always have to be regraded, check to
# see if any of our locations are in the scores from the submissions
# API. If scores exist, we have to calculate grades for this section.
if not should_grade_section:
should_grade_section = any(
descriptor.location.to_deprecated_string() in submissions_scores
for descriptor in section['xmoduledescriptors']
)
if not should_grade_section:
should_grade_section = any(
descriptor.location in scores_client
for descriptor in section['xmoduledescriptors']
)
# If we haven't seen a single problem in the section, we don't have
# to grade it at all! We can assume 0%
if should_grade_section:
scores = []
def create_module(descriptor):
'''creates an XModule instance given a descriptor'''
# TODO: We need the request to pass into here. If we could forego that, our arguments
# would be simpler
return get_module_for_descriptor(
student, request, descriptor, field_data_cache, course.id, course=course
)
descendants = yield_dynamic_descriptor_descendants(section_descriptor, student.id, create_module)
for module_descriptor in descendants:
user_access = has_access(
student, 'load', module_descriptor, module_descriptor.location.course_key
)
if not user_access:
continue
(correct, total) = get_score(
student,
module_descriptor,
create_module,
scores_client,
submissions_scores,
max_scores_cache,
)
if correct is None and total is None:
continue
if settings.GENERATE_PROFILE_SCORES: # for debugging!
if total > 1:
correct = random.randrange(max(total - 2, 1), total + 1)
else:
correct = total
graded = module_descriptor.graded
if not total > 0:
# We simply cannot grade a problem that is 12/0, because we might need it as a percentage
graded = False
scores.append(
Score(
correct,
total,
graded,
module_descriptor.display_name_with_default_escaped,
module_descriptor.location
)
)
__, graded_total = graders.aggregate_scores(scores, section_name)
if keep_raw_scores:
raw_scores += scores
else:
graded_total = Score(0.0, 1.0, True, section_name, None)
#Add the graded total to totaled_scores
if graded_total.possible > 0:
format_scores.append(graded_total)
else:
log.info(
"Unable to grade a section with a total possible score of zero. " +
str(section_descriptor.location)
)
totaled_scores[section_format] = format_scores
with outer_atomic():
# Grading policy might be overriden by a CCX, need to reset it
course.set_grading_policy(course.grading_policy)
grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)
# We round the grade here, to make sure that the grade is an whole percentage and
# doesn't get displayed differently than it gets grades
grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100
letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
grade_summary['grade'] = letter_grade
grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging
if keep_raw_scores:
# way to get all RAW scores out to instructor
# so grader can be double-checked
grade_summary['raw_scores'] = raw_scores
max_scores_cache.push_to_remote()
return grade_summary
def grade_for_percentage(grade_cutoffs, percentage):
"""
Returns a letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None.
Arguments
- grade_cutoffs is a dictionary mapping a grade to the lowest
possible percentage to earn that grade.
- percentage is the final percent across all problems in a course
"""
letter_grade = None
# Possible grades, sorted in descending order of score
descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)
for possible_grade in descending_grades:
if percentage >= grade_cutoffs[possible_grade]:
letter_grade = possible_grade
break
return letter_grade
def progress_summary(student, request, course, field_data_cache=None, scores_client=None):
"""
Returns progress summary for all chapters in the course.
"""
progress = _progress_summary(student, request, course, field_data_cache, scores_client)
if progress:
return progress.chapters
else:
return None
def get_weighted_scores(student, course, field_data_cache=None, scores_client=None):
"""
Uses the _progress_summary method to return a ProgressSummmary object
containing details of a students weighted scores for the course.
"""
request = _get_mock_request(student)
return _progress_summary(student, request, course, field_data_cache, scores_client)
# TODO: This method is not very good. It was written in the old course style and
# then converted over and performance is not good. Once the progress page is redesigned
# to not have the progress summary this method should be deleted (so it won't be copied).
def _progress_summary(student, request, course, field_data_cache=None, scores_client=None):
"""
Unwrapped version of "progress_summary".
This pulls a summary of all problems in the course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
Arguments:
student: A User object for the student to grade
course: A Descriptor containing the course to grade
If the student does not have access to load the course module, this function
will return None.
"""
with outer_atomic():
if field_data_cache is None:
field_data_cache = field_data_cache_for_grading(course, student)
if scores_client is None:
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
course_module = get_module_for_descriptor(
student, request, course, field_data_cache, course.id, course=course
)
if not course_module:
return None
course_module = getattr(course_module, '_x_module', course_module)
# We need to import this here to avoid a circular dependency of the form:
# XBlock --> submissions --> Django Rest Framework error strings -->
# Django translation --> ... --> courseware --> submissions
from submissions import api as sub_api # installed from the edx-submissions repository
with outer_atomic():
submissions_scores = sub_api.get_scores(
course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)
)
max_scores_cache = MaxScoresCache.create_for_course(course)
# For the moment, we have to get scorable_locations from field_data_cache
# and not from scores_client, because scores_client is ignorant of things
# in the submissions API. As a further refactoring step, submissions should
# be hidden behind the ScoresClient.
max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)
# Check for gated content
gated_content = gating_api.get_gated_content(course, student)
chapters = []
locations_to_children = defaultdict(list)
locations_to_weighted_scores = {}
# Don't include chapters that aren't displayable (e.g. due to error)
for chapter_module in course_module.get_display_items():
# Skip if the chapter is hidden
if chapter_module.hide_from_toc:
continue
sections = []
for section_module in chapter_module.get_display_items():
# Skip if the section is hidden
with outer_atomic():
if section_module.hide_from_toc or unicode(section_module.location) in gated_content:
continue
graded = section_module.graded
scores = []
module_creator = section_module.xmodule_runtime.get_module
for module_descriptor in yield_dynamic_descriptor_descendants(
section_module, student.id, module_creator
):
location_parent = module_descriptor.parent.replace(version=None, branch=None)
location_to_save = module_descriptor.location.replace(version=None, branch=None)
locations_to_children[location_parent].append(location_to_save)
(correct, total) = get_score(
student,
module_descriptor,
module_creator,
scores_client,
submissions_scores,
max_scores_cache,
)
if correct is None and total is None:
continue
weighted_location_score = Score(
correct,
total,
graded,
module_descriptor.display_name_with_default_escaped,
module_descriptor.location
)
scores.append(weighted_location_score)
locations_to_weighted_scores[location_to_save] = weighted_location_score
scores.reverse()
section_total, _ = graders.aggregate_scores(
scores, section_module.display_name_with_default_escaped)
module_format = section_module.format if section_module.format is not None else ''
sections.append({
'display_name': section_module.display_name_with_default_escaped,
'url_name': section_module.url_name,
'scores': scores,
'section_total': section_total,
'format': module_format,
'due': section_module.due,
'graded': graded,
})
chapters.append({
'course': course.display_name_with_default_escaped,
'display_name': chapter_module.display_name_with_default_escaped,
'url_name': chapter_module.url_name,
'sections': sections
})
max_scores_cache.push_to_remote()
return ProgressSummary(chapters, locations_to_weighted_scores, locations_to_children)
def weighted_score(raw_correct, raw_total, weight):
"""Return a tuple that represents the weighted (correct, total) score."""
# If there is no weighting, or weighting can't be applied, return input.
if weight is None or raw_total == 0:
return (raw_correct, raw_total)
return (float(raw_correct) * weight / raw_total, float(weight))
def get_score(user, problem_descriptor, module_creator, scores_client, submissions_scores_cache, max_scores_cache):
"""
Return the score for a user on a problem, as a tuple (correct, total).
e.g. (5,7) if you got 5 out of 7 points.
If this problem doesn't have a score, or we couldn't load it, returns (None,
None).
user: a Student object
problem_descriptor: an XModuleDescriptor
scores_client: an initialized ScoresClient
module_creator: a function that takes a descriptor, and returns the corresponding XModule for this user.
Can return None if user doesn't have access, or if something else went wrong.
submissions_scores_cache: A dict of location names to (earned, possible) point tuples.
If an entry is found in this cache, it takes precedence.
max_scores_cache: a MaxScoresCache
"""
submissions_scores_cache = submissions_scores_cache or {}
if not user.is_authenticated():
return (None, None)
location_url = problem_descriptor.location.to_deprecated_string()
if location_url in submissions_scores_cache:
return submissions_scores_cache[location_url]
# some problems have state that is updated independently of interaction
# with the LMS, so they need to always be scored. (E.g. combinedopenended ORA1.)
if problem_descriptor.always_recalculate_grades:
problem = module_creator(problem_descriptor)
if problem is None:
return (None, None)
score = problem.get_score()
if score is not None:
return (score['score'], score['total'])
else:
return (None, None)
if not problem_descriptor.has_score:
# These are not problems, and do not have a score
return (None, None)
# Check the score that comes from the ScoresClient (out of CSM).
# If an entry exists and has a total associated with it, we trust that
# value. This is important for cases where a student might have seen an
# older version of the problem -- they're still graded on what was possible
# when they tried the problem, not what it's worth now.
score = scores_client.get(problem_descriptor.location)
cached_max_score = max_scores_cache.get(problem_descriptor.location)
if score and score.total is not None:
# We have a valid score, just use it.
correct = score.correct if score.correct is not None else 0.0
total = score.total
elif cached_max_score is not None and settings.FEATURES.get("ENABLE_MAX_SCORE_CACHE"):
# We don't have a valid score entry but we know from our cache what the
# max possible score is, so they've earned 0.0 / cached_max_score
correct = 0.0
total = cached_max_score
else:
# This means we don't have a valid score entry and we don't have a
# cached_max_score on hand. We know they've earned 0.0 points on this,
# but we need to instantiate the module (i.e. load student state) in
# order to find out how much it was worth.
problem = module_creator(problem_descriptor)
if problem is None:
return (None, None)
correct = 0.0
total = problem.max_score()
# Problem may be an error module (if something in the problem builder failed)
# In which case total might be None
if total is None:
return (None, None)
else:
# add location to the max score cache
max_scores_cache.set(problem_descriptor.location, total)
return weighted_score(correct, total, problem_descriptor.weight)
def iterate_grades_for(course_or_id, students, keep_raw_scores=False):
"""Given a course_id and an iterable of students (User), yield a tuple of:
(student, gradeset, err_msg) for every student enrolled in the course.
If an error occurred, gradeset will be an empty dict and err_msg will be an
exception message. If there was no error, err_msg is an empty string.
The gradeset is a dictionary with the following fields:
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
- raw_scores: contains scores for every graded module
"""
if isinstance(course_or_id, (basestring, CourseKey)):
course = courses.get_course_by_id(course_or_id)
else:
course = course_or_id
for student in students:
with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]):
try:
request = _get_mock_request(student)
# Grading calls problem rendering, which calls masquerading,
# which checks session vars -- thus the empty session dict below.
# It's not pretty, but untangling that is currently beyond the
# scope of this feature.
request.session = {}
gradeset = grade(student, request, course, keep_raw_scores)
yield student, gradeset, ""
except Exception as exc: # pylint: disable=broad-except
# Keep marching on even if this student couldn't be graded for
# some reason, but log it for future reference.
log.exception(
'Cannot grade student %s (%s) in course %s because of exception: %s',
student.username,
student.id,
course.id,
exc.message
)
yield student, {}, exc.message
def _get_mock_request(student):
"""
Make a fake request because grading code expects to be able to look at
the request. We have to attach the correct user to the request before
grading that student.
"""
request = RequestFactory().get('/')
request.user = student
return request
def _calculate_score_for_modules(user_id, course, modules):
"""
Calculates the cumulative score (percent) of the given modules
"""
# removing branch and version from exam modules locator
# otherwise student module would not return scores since module usage keys would not match
modules = [m for m in modules]
locations = [
BlockUsageLocator(
course_key=course.id,
block_type=module.location.block_type,
block_id=module.location.block_id
)
if isinstance(module.location, BlockUsageLocator) and module.location.version
else module.location
for module in modules
]
scores_client = ScoresClient(course.id, user_id)
scores_client.fetch_scores(locations)
# Iterate over all of the exam modules to get score percentage of user for each of them
module_percentages = []
ignore_categories = ['course', 'chapter', 'sequential', 'vertical', 'randomize', 'library_content']
for index, module in enumerate(modules):
if module.category not in ignore_categories and (module.graded or module.has_score):
module_score = scores_client.get(locations[index])
if module_score:
correct = module_score.correct or 0
total = module_score.total or 1
module_percentages.append(correct / total)
return sum(module_percentages) / float(len(module_percentages)) if module_percentages else 0
def get_module_score(user, course, module):
"""
Collects all children of the given module and calculates the cumulative
score for this set of modules for the given user.
Arguments:
user (User): The user
course (CourseModule): The course
module (XBlock): The module
Returns:
float: The cumulative score
"""
def inner_get_module(descriptor):
"""
Delegate to get_module_for_descriptor
"""
field_data_cache = FieldDataCache([descriptor], course.id, user)
return get_module_for_descriptor(
user,
_get_mock_request(user),
descriptor,
field_data_cache,
course.id,
course=course
)
modules = yield_dynamic_descriptor_descendants(
module,
user.id,
inner_get_module
)
return _calculate_score_for_modules(user.id, course, modules)
|
ampax/edx-platform
|
lms/djangoapps/courseware/grades.py
|
Python
|
agpl-3.0
| 37,488
|
# File: Relation.py
# Date: 20 gen 16
# Note: adattamento del file relation.py di Relational
# This module provides a classes to represent relations and to perform
# relational operations on them.
from . import Exp, Types
import csv
class Relation (object):
'''This objects defines a relation (as a group of consistent tuples) and operations
A relation can be represented using a table
Calling an operation and providing a non relation parameter when it is expected will
result in a None value'''
__hash__ = None
# crea una relazione a partire da un testo csv organizzato in linee
# secondo il seguente formato (argomento text):
# oppure, se text != None, in base ad un testo secondo il seguente formato csv in linee:
#
# TabName
# Attr1, Attr2, ..., Attrn
# v11, v12, ..., v1n
# ...
# vm1, vmn, ..., vmn
#
# Se text==None viene generata una relazione vuota di nome 'unnamed';
# se text contiene una sola linea (nome tabella) viene generata una r. di nome indicato;
# se text contiene 2 linee viene definito anche lo schema della rel. (seconda linea);
# se text contiene >2 linee, dalla terza sono riportati i dati dell'estensione
#
def __init__(self, text=None):
'''
Creates a relation.
Empty relations are used in internal operations.
'''
#print('Relation: TEXT =',text)
self._readonly = False
if text != None:
lines = text.split('\n')
nl = len(lines)
if nl>0: name = lines[0]
else:
nl = 0
name = 'unnamed'
self.relname = name # relation name #lc
self.header = Header([])
self.content = set()
# definizione dello schema e dell'estensione
if nl>1:
a = lines[1].split(',')
self.header = Header(tuple(a)) # array dei nomi degli attributi
if nl>2:
for i in range(2,nl-1): # Iterating rows
a = lines[i].split(',')
self.content.add(tuple(a)) # USARE insert per evitare ripetizioni
def _make_writable(self):
'''If this relation is marked as readonly, this
method will copy the content to make it writable too'''
if self._readonly:
self.content = set(self.content)
self._readonly = False
# name : nome interno da assegnare alla tabella
# path : perscorso completo del file da leggere
def load(self, name, path):
'''
Loads the relation name from the file specified by path.
The file will be handled like a comma separated as described in RFC4180.
'''
self.relname = name # relation name #lc
"""
if (len(path) == 0) and (len(name)==0): # Empty relation
self.content = set()
self.header = Header([])
return
"""
# Opening file
#utils.debug("in Relation.__init__ : PATH = "+path)
#utils.debug("in Relation.__init__ : NAME = "+name)
#fp = open(path) # migliorato sotto
try:
fp = open(path)
except Exception as e:
#print('ERRORE IN APERTURA FILE')
#raise Exception('Error in opening file \''+path+'\'')
return(-1,'Error in opening file \''+path+'\'')
reader = csv.reader(fp) # Creating a csv reader
self.header = Header(next(reader)) # read 1st line
self.content = set()
for i in reader.__iter__(): # Iterating rows
self.content.add(tuple(i)) # USARE insert per evitare ripetizioni
# Closing file
fp.close()
return(0,'ok')
def save(self, filename):
'''
Saves the relation in a file. By default will save using the csv
format as defined in RFC4180, but setting comma_separated to False,
it will use the old format with space separated values.
'''
fp = open(filename, 'w') # opening file
writer = csv.writer(fp) # creating csv writer
# It wants an iterable containing iterables
head = (self.header.attributes,)
writer.writerows(head)
# Writing content, already in the correct format
writer.writerows(self.content)
fp.close() # Closing file
def _rearrange_(self, other):
'''If two relations share the same attributes in a different order, this method
will use projection to make them have the same attributes' order.
It is not exactely related to relational algebra. Just a method used
internally.
Will return None if they don't share the same attributes'''
if (self.__class__ != other.__class__):
return None
if self.header.sharedAttributes(other.header) == len(self.header.attributes) == len(other.header.attributes):
return other.projection(list(self.header.attributes))
return None
def _autocast(self, string):
'''Depending on the regexp matched by the string,
it will perform automatic casting'''
tmpstring = Types.Rstring(string)
if len(tmpstring) > 0 and tmpstring.isInt():
return int(tmpstring)
elif len(tmpstring) > 0 and tmpstring.isFloat():
return float(tmpstring)
elif len(tmpstring) > 0 and tmpstring.isDate():
return Types.rdate(tmpstring)
else:
return tmpstring
def selection(self, expr):
'''Selection, expr must be a valid boolean expression, can contain field names,
constant, math operations and boolean ones.'''
attributes = {}
newt = Relation()
newt.header = Header(list(self.header.attributes))
for i in self.content:
# Fills the attributes dictionary with the values of the tuple
for j in range(len(self.header.attributes)):
attributes[self.header.attributes[j]] = self._autocast(i[j])
try:
#print('in Relation.selection expr = ',expr)
#print('in Relation.selection attr = ',attributes)
if eval(expr, attributes):
newt.content.add(i)
#print('+ ',i)
#else:
#print('- ',i)
except Exception as e:
raise Exception("Failed to evaluate %s\n%s" % (expr, e.__str__()))
return newt
def product(self, other):
'''Cartesian product, attributes must be different to avoid collisions
Doing this operation on relations with colliding attributes will
cause an exception.
It is possible to use rename on attributes and then use the product'''
if (self.__class__ != other.__class__)or(self.header.sharedAttributes(other.header) != 0):
raise Exception('Unable to perform product on relations with colliding attributes')
newt = Relation()
newt.header = Header(self.header.attributes + other.header.attributes)
for i in self.content:
for j in other.content:
newt.content.add(i + j)
return newt
def projection(self, * attributes):
'''Projection operator, takes many parameters, for each field to use.
Can also use a single parameter with a list.
Will delete duplicate items
If an empty list or no parameters are provided, returns None'''
# Parameters are supplied in a list, instead with multiple parameters
if isinstance(attributes[0], list):
attributes = attributes[0]
# Avoiding duplicated attributes
attributes1 = []
for i in attributes:
if i not in attributes1:
attributes1.append(i)
attributes = attributes1
ids = self.header.getAttributesId(attributes)
#print('Relation: ATTRS =',attributes,' ids =',ids)
if len(ids) == 0 or len(ids) != len(attributes):
raise Exception('Invalid attributes for projection')
newt = Relation()
# Create the header
h = []
for i in ids:
h.append(self.header.attributes[i])
newt.header = Header(h)
# Create the body
for i in self.content:
row = []
for j in ids:
row.append(i[j])
newt.contenuti.add(tuple(row))
return newt
def rename(self, params):
'''Operation rename. Takes a dictionary
Will replace the itmem with its content.
For example if you want to rename a to b, provide {"a":"b"}
'''
result = []
newt = Relation()
newt.header = Header(list(self.header.attributes))
for old, new in params.items():
if (newt.header.rename(old, new)) == False:
raise Exception('Unable to find attribute: %s' % old)
newt.content = self.content
newt._readonly = True
return newt
def intersection(self, other):
'''Intersection operation. The result will contain items present in both
operands.
Will return an empty one if there are no common items.
Will return None if headers are different.
It is possible to use projection and rename to make headers match.'''
other = self._rearrange_(other) # Rearranges attributes' order
if (self.__class__ != other.__class__)or(self.header != other.header):
raise Exception(
'Unable to perform intersection on relations with different attributes')
newt = Relation()
newt.header = Header(list(self.header.attributes))
newt.content = self.content.Intersectionection(other.content)
return newt
def difference(self, other):
'''Difference operation. The result will contain items present in first
operand but not in second one.
Will return an empty one if the second is a superset of first.
Will return None if headers are different.
It is possible to use projection and rename to make headers match.'''
other = self._rearrange_(other) # Rearranges attributes' order
if (self.__class__ != other.__class__)or(self.header != other.header):
raise Exception(
'Unable to perform difference on relations with different attributes')
newt = Relation()
newt.header = Header(list(self.header.attributes))
newt.content = self.content.difference(other.content)
return newt
def division(self, other):
'''Division operator
The division is a binary operation that is written as R ÷ S. The
result consists of the restrictions of tuples in R to the
attribute names unique to R, i.e., in the header of R but not in the
header of S, for which it holds that all their combinations with tuples
in S are present in R.
'''
# d_headers are the headers from self that aren't also headers in other
d_headers = list(set(self.header.attributes) - set(other.header.attributes))
t = self.projection(d_headers).product(other)
return self.projection(d_headers).difference(t.difference(self).projection(d_headers))
def union(self, other):
'''Union operation. The result will contain items present in first
and second operands.
Will return an empty one if both are empty.
Will not insert tuplicated items.
Will return None if headers are different.
It is possible to use projection and rename to make headers match.'''
other = self._rearrange_(other) # Rearranges attributes' order
if (self.__class__ != other.__class__)or(self.header != other.header):
raise Exception('Unable to perform union on relations with different attributes')
newt = Relation()
newt.header = Header(list(self.header.attributes))
newt.content = self.content.union(other.content)
return newt
def join(self, other, cond=None):
'''Natural join, joins on shared attributes (one or more). If there are no
shared attributes, it will behave as cartesian product.
se la condizione cond e' diversa da None viene eseguito un theta-join,
altrimenti un join naturale
'''
# soluzione provvisoria: il theta join viene eseguito mediante un prodotto cartesiano
# seguito da una selezione (migliorare l'efficienza)
if cond != None:
return self.product(other).selection(cond)
# List of attributes in common between the relations
shared = list(set(self.header.attributes)
.intersection(set(other.header.attributes)))
newt = Relation() # Creates the new relation
# Adding to the headers all the fields, done like that because order is needed
newt.header = Header(list(self.header.attributes))
for i in other.header.attributes:
if i not in shared:
newt.header.attributes.append(i)
# Shared ids of self
sid = self.header.getAttributesId(shared)
# Shared ids of the other relation
oid = other.header.getAttributesId(shared)
# Non shared ids of the other relation
noid = []
for i in range(len(other.header.attributes)):
if i not in oid:
noid.append(i)
for i in self.content:
for j in other.content:
match = True
for k in range(len(sid)):
match = match and (i[sid[k]] == j[oid[k]])
if match:
item = list(i)
for l in noid:
item.append(j[l])
newt.content.add(tuple(item))
return newt
def outer(self, other):
'''Does a left and a right outer join and returns their union.'''
a = self.outer_right(other)
b = self.outer_left(other)
return a.union(b)
def outer_right(self, other):
'''Outer right join. Considers self as left and param as right. If the
tuple has no corrispondence, empy attributes are filled with a "---"
string. This is due to the fact that empty string or a space would cause
problems when saving the relation.
Just like natural join, it works considering shared attributes.'''
return other.outer_left(self)
#def outer_left(self, other, swap=False): #orig
def outer_left(self, other, swap_bho=False): #lc
'''Outer left join. Considers self as left and param as right. If the
tuple has no corrispondence, empty attributes are filled with a "---"
string. This is due to the fact that empty string or a space would cause
problems when saving the relation.
Just like natural join, it works considering shared attributes.'''
shared = []
for i in self.header.attributes:
if i in other.header.attributes:
shared.append(i)
newt = Relation() # Creates the new relation
# Adds all the attributes of the 1st relation
newt.header = Header(list(self.header.attributes))
# Adds all the attributes of the 2nd, when non shared
for i in other.header.attributes:
if i not in shared:
newt.header.attributes.append(i)
# Shared ids of self
sid = self.header.getAttributesId(shared)
# Shared ids of the other relation
oid = other.header.getAttributesId(shared)
# Non shared ids of the other relation
noid = []
for i in range(len(other.header.attributes)):
if i not in oid:
noid.append(i)
for i in self.content:
# Tuple partecipated to the join?
added = False
for j in other.content:
match = True
for k in range(len(sid)):
match = match and (i[sid[k]] == j[oid[k]])
if match:
item = list(i)
for l in noid:
item.append(j[l])
newt.content.add(tuple(item))
added = True
# If it didn't partecipate, adds it
if not added:
item = list(i)
for l in range(len(noid)):
#item.append("---")
item.append(NULL)
newt.content.add(tuple(item))
return newt
#-------------------- join esterni con condizione [+lc] -----------------------
# left outer theta-join join esterno sinistro con condizione
# if swap==True tuples of other are placed at the left side
# and the tuples of selft are pleced at the right side
# (to manage outer right join)
def left_theta_join(self, other, cond, swap=False):
#print("--------> relation.left_theta_join 1")
attributes = {}
newt = Relation() # empty relation
#print("--------> relation.left_theta_join 2")
if not swap:
newt.header = Header(self.header.attributes + other.header.attributes)
else:
newt.header = Header(other.header.attributes + self.header.attributes)
for i in self.content: # loop on self tuples
added = False
for j in other.content: # loop on other's' tuples
row = i+j if not swap else j+i
# Fills the attributes dictionary with the values of the tuple
for j in range(len(newt.header.attributes)):
attributes[newt.header.attributes[j]] = newt._autocast(row[j])
try:
if eval(cond, attributes):
newt.content.add(row)
added = True
except Exception as e:
raise Exception("Failed to evaluate %s\n%s" % (cond, e.__str__()))
# If it didn't partecipate, adds it
if not added:
#newt.content.add(i) #provvisorio ok
t1 = list(i)
t2 = []
for l in range(len(other.header.attributes)):
t2.append(NULL)
row = t1+t2 if not swap else t2+t1
newt.content.add(tuple(row))
return newt
def right_theta_join(self, other, cond):
return other.left_theta_join(self,cond,True)
# full outer theta join
def full_theta_join(self, other, cond):
a = self.left_theta_join(other,cond)
b = self.right_theta_join(other,cond)
return a.union(b)
#-----------------------------------------------------------------------------
def __eq__(self, other):
'''Returns true if the relations are the same, ignoring order of items.
This operation is rather heavy, since it requires sorting and comparing.'''
other = self._rearrange_(
other) # Rearranges attributes' order so can compare tuples directly
if (self.__class__ != other.__class__)or(self.header != other.header):
return False # Both parameters must be a relation
if set(self.header.attributes) != set(other.header.attributes):
return False
# comparing content
return self.content == other.content
def __str__(self):
'''Returns a string representation of the relation, can be printed with
monospaced fonts'''
m_len = [] # Maximum lenght string
for f in self.header.attributes:
m_len.append(len(f))
for f in self.content:
col = 0
for i in f:
if len(i) > m_len[col]:
m_len[col] = len(i)
col += 1
res = ""
for f in range(len(self.header.attributes)):
res += "%s" % (self.header.attributes[f].ljust(2 + m_len[f]))
for r in self.content:
col = 0
res += "\n"
for i in r:
res += "%s" % (i.ljust(2 + m_len[col]))
col += 1
return res
# conversione relazione in formato csv
def csv(self): #lc
'''
Convert relation in a csv format:
tabname
attr1,attr2,attr3
v11,v12,v13
v21,v22,v23
'''
res = self.relname+"\n"
for a in self.header.attributes:
res += a+","
res = res[:len(res)-1] # elimina , finale
res += '\n'
for r in self.content:
for i in r:
res += i+','
res = res[:len(res)-1] # elimina , finale
res += '\n'
return res
# sostituisce tutta l'estensione della relazione con i valori
# contenuti nelle righe di text (in formato csv) [lc]
def replace(self, text):
#print('Relation:replace:\n TEXT=[',text,']')
lines = text.split('\n')
nl = len(lines)-1 # l'ultima riga e' vuota; perche'?
self.content = set() # svuotamento
for i in range(nl): # Iterating rows
a = lines[i].split(',')
#print('Relation: A =',a)
self.content.add(tuple(a)) # USARE insert per evitare ripetizioni
return nl
def update(self, expr, dic):
'''Update, expr must be a valid boolean expression, can contain field names,
constant, math operations and boolean ones.
This operation will change the relation itself instead of generating a new one,
updating all the tuples that make expr true.
Dic must be a dictionary that has the form field name:value.
Every kind of value will be converted into a string.
Returns the number of affected rows.'''
self._make_writable()
affected = 0
attributes = {}
keys = list(dic.keys()) # List of headers to modify
f_ids = self.header.getAttributesId(
keys) # List of indexes corresponding to keys
# new_content=[] #New content of the relation
for i in self.content:
for j in range(len(self.header.attributes)):
attributes[self.header.attributes[j]] = self._autocast(i[j])
if eval(expr, attributes): # If expr is true, changing the tuple
affected += 1
new_tuple = list(i)
# Deleting the tuple, instead of changing it, so other
# relations can still point to the same list without
# being affected.
self.content.remove(i)
for k in range(len(keys)):
new_tuple[f_ids[k]] = str(dic[keys[k]])
self.content.add(tuple(new_tuple))
return affected
def insert(self, values):
'''Inserts a tuple in the relation.
This function will not insert duplicate tuples.
All the values will be converted in string.
Will return the number of inserted rows.'''
# Returns if tuple doesn't fit the number of attributes
if len(self.header.attributes) != len(values):
return 0
self._make_writable()
# Creating list containing only strings
t = []
for i in values:
t.append(str(i))
prevlen = len(self.content)
self.content.add(tuple(t))
return len(self.content) - prevlen
def delete(self, expr):
'''Delete, expr must be a valid boolean expression, can contain field names,
constant, math operations and boolean ones.
This operation will change the relation itself instead of generating a new one,
deleting all the tuples that make expr true.
Returns the number of affected rows.'''
self._make_writable()
attributes = {}
affected = len(self.content)
new_content = set() # New content of the relation
for i in self.content:
for j in range(len(self.header.attributes)):
attributes[self.header.attributes[j]] = self._autocast(i[j])
if not eval(expr, attributes):
affected -= 1
new_content.add(i)
self.content = new_content
return affected
#=========================================================================
class Header (object):
'''This class defines the header of a relation.
It is used within relations to know if requested operations are accepted'''
# Since relations are mutable we explicitly block hashing them
__hash__ = None
def __init__(self, attributes):
'''Accepts a list with attributes' names. Names MUST be unique'''
self.attributes = attributes
for i in attributes:
#if not is_valid_relation_name(i):
#if not Rstring(i).is_valid_relation_name():
if not Exp.is_valid_relation_name(i):
raise Exception('"%s" is not a valid attribute name' % i)
def __repr__(self):
return "header(%s)" % (self.attributes.__repr__())
def rename(self, old, new):
'''Renames a field. Doesn't check if it is a duplicate.
Returns True if the field was renamed, False otherwise'''
if not Exp.is_valid_relation_name(new):
raise Exception('%s is not a valid attribute name' % new)
try:
id_ = self.attributes.index(old)
self.attributes[id_] = new
except:
return False
return True
def sharedAttributes(self, other):
'''Returns how many attributes this header has in common with a given one'''
return len(set(self.attributes).intersection(set(other.attributes)))
def __str__(self):
'''Returns String representation of the field's list'''
return self.attributes.__str__()
def __eq__(self, other):
return self.attributes == other.attributes
def __ne__(self, other):
return self.attributes != other.attributes
def getAttributesId(self, param):
'''Returns a list with numeric index corresponding to field's name'''
res = []
for i in param:
for j in range(len(self.attributes)):
if i == self.attributes[j]:
res.append(j)
return res
if __name__ == "__main__":
#import utils
#from Types import * # per provare in locale
r = Relation('dipendente.csv')
print('R=',r)
r.delete('Cognome>\'G\'')
print('R=',r)
|
thadumi/radb
|
bin/core/Relation.py
|
Python
|
gpl-3.0
| 27,502
|
import libtcodpy as libtcod
class Doodad(object):
def __init__(self, x, y, colors=None, tileSize=1):
self.x = x
self.y = y
self.character = None
self.tileSize = tileSize
if colors is not None:
self.color = colors[libtcod.random_get_int(0,0, len(colors) - 1)]
def draw(self, con, theMap, blocks, blockSight):
tileSize = None
if self.tileSize != 1:
tileSize = self.tileSize / 2
else:
tileSize = self.tileSize
for x in range(tileSize):
for y in range(tileSize):
if self.x + x > theMap.x2 - 1 or self.y + y > theMap.y2 - 1:
continue
if libtcod.map_is_in_fov(theMap.fovMap, self.x + x, self.y + y):
libtcod.console_set_default_foreground(con, self.color)
libtcod.console_put_char(con, self.x + x, self.y + y, self.character, libtcod.BKGND_NONE)
class Grass(Doodad):
def __init__(self, x, y, tileSize=1):
colors = [libtcod.light_green, libtcod.lighter_green, libtcod.light_yellow, libtcod.lighter_yellow]
self.deadColor = libtcod.gray
self.tileSize = tileSize
super(Grass, self).__init__(x, y, colors=colors, tileSize=self.tileSize)
self.character = '\''
self.blocks = False
self.blockSight = False
def draw(self, con, fovMap=None):
super(Grass, self).draw(con, fovMap, self.blocks, self.blockSight)
class Tree(Doodad):
def __init__(self, x, y, tileSize=4):
colors = [libtcod.dark_amber, libtcod.dark_green, libtcod.dark_lime]
self.deadColor = libtcod.gray
self.tileSize = tileSize
super(Tree, self).__init__(x, y, colors=colors, tileSize=self.tileSize)
self.blocks = True
self.blockSight = True
self.character = 'O'
def draw(self, con, theMap):
super(Tree, self).draw(con, theMap, self.blocks, self.blockSight)
class Lake(Doodad):
def __init__(self, x, y):
colors = [libtcod.blue]
self.tileSize = libtcod.random_get_int(0, 4, 8)
super(Lake, self).__init__(x, y, colors=colors, tileSize=self.tileSize)
self.blockSight = False
self.blocks = True
self.character = '~'
def draw(self, con, theMap):
super(Lake, self).draw(con, theMap, self.blocks, self.blockSight)
|
sandlst/journey-of-the-necromancer
|
doodad.py
|
Python
|
mit
| 2,395
|
# https://www.hackerrank.com/challenges/nested-list
# Enter your code here. Read input from STDIN. Print output to STDOUT
a = int(raw_input().strip())
data = []
result = []
for i in range(0, a):
name = str(raw_input().strip())
marks = float(raw_input().strip())
data.append([name, marks])
marks = zip(*data)[1]
marks = list(marks)
marks.sort()
def find_second(i):
if marks[i] != marks[i - 1]:
return marks[i]
i += 1
return find_second(i)
small = find_second(1)
for j in data:
if j[1] == small:
result.append(j[0])
result.sort()
for k in result:
print(k)
|
JaguarPaw2409/HackerRank
|
python/nested_list.py
|
Python
|
gpl-3.0
| 604
|
#!/usr/bin/env python3
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allots libraries to modules to be packaged into.
All libraries that are depended on by a single module will be allotted to this
module. All other libraries will be allotted to the closest ancestor.
Example:
Given the module dependency structure
c
/ \
b d
/ \
a e
and libraries assignment
a: ['lib1.so']
e: ['lib2.so', 'lib1.so']
will make the allotment decision
c: ['lib1.so']
e: ['lib2.so']
The above example is invoked via:
./allot_native_libraries \
--libraries 'a,["1.so"]' \
--libraries 'e,["2.so", "1.so"]' \
--dep c:b \
--dep b:a \
--dep c:d \
--dep d:e \
--output <output JSON>
"""
import argparse
import collections
import json
import sys
from util import build_utils
def _ModuleLibrariesPair(arg):
pos = arg.find(',')
assert pos > 0
return (arg[:pos], arg[pos + 1:])
def _DepPair(arg):
parent, child = arg.split(':')
return (parent, child)
def _PathFromRoot(module_tree, module):
"""Computes path from root to a module.
Parameters:
module_tree: Dictionary mapping each module to its parent.
module: Module to which to compute the path.
Returns:
Path from root the the module.
"""
path = [module]
while module_tree.get(module):
module = module_tree[module]
path = [module] + path
return path
def _ClosestCommonAncestor(module_tree, modules):
"""Computes the common ancestor of a set of modules.
Parameters:
module_tree: Dictionary mapping each module to its parent.
modules: Set of modules for which to find the closest common ancestor.
Returns:
The closest common ancestor.
"""
paths = [_PathFromRoot(module_tree, m) for m in modules]
assert len(paths) > 0
ancestor = None
for level in zip(*paths):
if len(set(level)) != 1:
return ancestor
ancestor = level[0]
return ancestor
def _AllotLibraries(module_tree, libraries_map):
"""Allot all libraries to a module.
Parameters:
module_tree: Dictionary mapping each module to its parent. Modules can map
to None, which is considered the root of the tree.
libraries_map: Dictionary mapping each library to a set of modules, which
depend on the library.
Returns:
A dictionary mapping mapping each module name to a set of libraries allotted
to the module such that libraries with multiple dependees are allotted to
the closest ancestor.
Raises:
Exception if some libraries can only be allotted to the None root.
"""
allotment_map = collections.defaultdict(set)
for library, modules in libraries_map.items():
ancestor = _ClosestCommonAncestor(module_tree, modules)
if not ancestor:
raise Exception('Cannot allot libraries for given dependency tree')
allotment_map[ancestor].add(library)
return allotment_map
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--libraries',
action='append',
type=_ModuleLibrariesPair,
required=True,
help='A pair of module name and GN list of libraries a module depends '
'on. Can be specified multiple times.')
parser.add_argument(
'--output',
required=True,
help='A JSON file with a key for each module mapping to a list of '
'libraries, which should be packaged into this module.')
parser.add_argument(
'--dep',
action='append',
type=_DepPair,
dest='deps',
default=[],
help='A pair of parent module name and child module name '
'(format: "<parent>:<child>"). Can be specified multiple times.')
options = parser.parse_args(build_utils.ExpandFileArgs(args))
options.libraries = [(m, build_utils.ParseGnList(l))
for m, l in options.libraries]
# Parse input creating libraries and dependency tree.
libraries_map = collections.defaultdict(set) # Maps each library to its
# dependee modules.
module_tree = {} # Maps each module name to its parent.
for module, libraries in options.libraries:
module_tree[module] = None
for library in libraries:
libraries_map[library].add(module)
for parent, child in options.deps:
if module_tree.get(child):
raise Exception('%s cannot have multiple parents' % child)
module_tree[child] = parent
module_tree[parent] = module_tree.get(parent)
# Allot all libraries to a module such that libraries with multiple dependees
# are allotted to the closest ancestor.
allotment_map = _AllotLibraries(module_tree, libraries_map)
# The build system expects there to be a set of libraries even for the modules
# that don't have any libraries allotted.
for module in module_tree:
# Creates missing sets because of defaultdict.
allotment_map[module] = allotment_map[module]
with open(options.output, 'w') as f:
# Write native libraries config and ensure the output is deterministic.
json.dump({m: sorted(l)
for m, l in allotment_map.items()},
f,
sort_keys=True,
indent=2)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
ric2b/Vivaldi-browser
|
chromium/build/android/gyp/allot_native_libraries.py
|
Python
|
bsd-3-clause
| 5,364
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import android_commands
import constants
import logging
import os
import subprocess
import time
class FakeDns(object):
"""Wrapper class for the fake_dns tool."""
_FAKE_DNS_PATH = constants.TEST_EXECUTABLE_DIR + '/fake_dns'
def __init__(self, adb, build_type):
"""
Args:
adb: the AndroidCommands to use.
build_type: 'Release' or 'Debug'.
"""
self._adb = adb
self._build_type = build_type
self._fake_dns = None
self._original_dns = None
def _PushAndStartFakeDns(self):
"""Starts the fake_dns server that replies all name queries 127.0.0.1.
Returns:
subprocess instance connected to the fake_dns process on the device.
"""
self._adb.PushIfNeeded(
os.path.join(constants.CHROME_DIR, 'out', self._build_type, 'fake_dns'),
FakeDns._FAKE_DNS_PATH)
return subprocess.Popen(
['adb', '-s', self._adb._adb.GetSerialNumber(),
'shell', '%s -D' % FakeDns._FAKE_DNS_PATH])
def SetUp(self):
"""Configures the system to point to a DNS server that replies 127.0.0.1.
This can be used in combination with the forwarder to forward all web
traffic to a replay server.
The TearDown() method will perform all cleanup.
"""
self._adb.RunShellCommand('ip route add 8.8.8.0/24 via 127.0.0.1 dev lo')
self._fake_dns = self._PushAndStartFakeDns()
self._original_dns = self._adb.RunShellCommand('getprop net.dns1')[0]
self._adb.RunShellCommand('setprop net.dns1 127.0.0.1')
time.sleep(2) # Time for server to start and the setprop to take effect.
def TearDown(self):
"""Shuts down the fake_dns."""
if self._fake_dns:
if not self._original_dns or self._original_dns == '127.0.0.1':
logging.warning('Bad original DNS, falling back to Google DNS.')
self._original_dns = '8.8.8.8'
self._adb.RunShellCommand('setprop net.dns1 %s' % self._original_dns)
self._fake_dns.kill()
self._adb.RunShellCommand('ip route del 8.8.8.0/24 via 127.0.0.1 dev lo')
|
matsumoto-r/synciga
|
src/build/android/pylib/fake_dns.py
|
Python
|
bsd-3-clause
| 2,195
|
__author__ = 'Nicklas Boerjesson'
import unittest
from os import listdir, remove, rmdir
from lib.smbutils import smb_connect
from service.lib.synctools import copy_files, walk_local, walk_smb
local_source_files = ['resources/source/test_root.txt', 'resources/source/l1/l2_1/l2_1.txt']
local_destination_files = ['resources/destination/test_root.txt', 'resources/destination/l1/l2_1/l2_1.txt']
smb_destination_files = ['test/destination/test_root.txt', 'test/destination/l1/l2_1/l2_1.txt']
def get_connection():
return smb_connect('fs01', 'tester', 'test')
def clear_smb(_conn=None):
try:
if _conn is None:
_conn = get_connection()
_conn.deleteFiles("test","destination/test_root.txt")
_conn.deleteFiles("test","destination/l1/l2_1/l2_1.txt")
_conn.deleteDirectory("test","destination/l1/l2_1")
_conn.deleteDirectory("test","destination/l1")
_conn.close()
except:
pass
def clear_local():
try:
remove("resources/destination/test_root.txt")
remove("resources/destination/l1/l2_1/l2_1.txt")
rmdir("resources/destination//l1/l2_1")
rmdir("resources/destination//l1")
except:
pass
class TestSyncTools(unittest.TestCase):
def _on_progress(self, _subject, _body):
print("_subject:" + _subject + " _body:" + _body)
def test_copy_files_local(self):
clear_local()
copy_files(_source_paths=local_source_files, _destination_paths=local_destination_files, _context = 'localtolocal', _on_progress=self._on_progress)
self.assertEqual(listdir('resources/destination'), ['l1', 'test_root.txt'], "Files not matching expected result")
clear_local()
def test_copy_files_local_to_SMB_and_back(self):
"""To run this test you need a SMB server called FS01 with the appropriate shares, see get_connection()"""
_connection = get_connection()
clear_smb(_connection)
copy_files(_source_paths=local_source_files, _destination_paths=smb_destination_files, _context = 'localtosmb',_smb_connection=_connection, _on_progress=self._on_progress)
#self.assertEqual(listdir('resources/destination'),['test2.txt', 'test1.txt'], "Files not matching expected result")
clear_local()
copy_files(_source_paths=smb_destination_files, _destination_paths=local_destination_files, _context = 'smbtolocal',_smb_connection=_connection, _on_progress=self._on_progress)
clear_smb(_connection)
_connection.close()
print(str([x[0:3] for x in walk_local('resources/destination')]))
self.assertEqual(
[
['resources/destination/l1', True, 0],
['resources/destination/test_root.txt', False, 0],
['resources/destination/l1/l2_1', True, 0],
['resources/destination/l1/l2_1/l2_1.txt', False, 9]
],
[x[0:3] for x in walk_local('resources/destination')],
"Files not matching expected result")
clear_local()
def test_walk_smb(self):
_connection = get_connection()
#print(str(walk_smb("test/l1", _connection)))
self.assertEqual(
[[u'test/l1/l2_1/l2_1.txt', False, 9],
[u'test/l1/l2_1', True, 0],
[u'test/l1/l2_2/l3_2_1', True, 0],
[u'test/l1/l2_2', True, 0],
[u'test/l1/L2_3/L2_3_1/L2_3_1.txt', False, 11],
[u'test/l1/L2_3/L2_3_1', True, 0],
[u'test/l1/L2_3', True, 0]
],
[x[0:3] for x in walk_smb("test/l1", _connection)],
"Directory structures differ"
)
def test_walk_local(self):
_sorted = sorted([x[0:3] for x in walk_local("resources/source")], key=lambda i:i[0])
print(str(_sorted))
self.assertEqual(
[
['resources/source/l1', True, 0],
['resources/source/l1/L2_3', True, 0],
['resources/source/l1/L2_3/L2_3_1', True, 0],
['resources/source/l1/L2_3/L2_3_1/L2_3_1.txt', False, 11],
['resources/source/l1/l2_1', True, 0],
['resources/source/l1/l2_1/l2_1.txt', False, 9],
['resources/source/l1/l2_2', True, 0],
['resources/source/l1/l2_2/l3_2_1', True, 0],
['resources/source/test_old.txt', False, 12],
['resources/source/test_root.txt', False, 0]
]
,
_sorted,
"Directory structures differ"
)
if __name__ == '__main__':
unittest.main()
|
OptimalBPM/optimal_file_sync
|
service/lib/tests/test_synctools.py
|
Python
|
apache-2.0
| 4,494
|
import subprocess
import re
import os
import logging
#TODO:
# add API to set credentials (Ammon Larsen)
logger = logging.getLogger('gitclient')
logger.setLevel(logging.DEBUG)
logger_handler_console = logging.StreamHandler()
logger_handler_console.setLevel(logging.DEBUG)
logger_formatter = logging.Formatter('%(asctime)s [%(name)s] [%(levelname)s] %(message)s')
logger_handler_console.setFormatter(logger_formatter)
logger.addHandler(logger_handler_console)
class command:
output = ''
returncode = 0
def __str__(self):
return 'returncode=%d\r\noutput=%s' %(self.returncode, self.output)
@staticmethod
def execute(cmd):
result = command()
try:
result.output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
result.returncode = 0
except subprocess.CalledProcessError as e:
result.output = e.output
result.returncode = e.returncode
return result
class GitRemote:
name = None
url = None
type = None
@staticmethod
def parse(cmdres):
result = []
lines = cmdres.output.split(b'\n')
for line in lines:
if line == b'':
continue;
tokens = []
for token in line.split(b' '):
sub_tokens = token.split(b'\t')
if len(sub_tokens) > 0:
for sub_token in sub_tokens:
tokens.append(sub_token)
else:
tokens.append(token)
item = GitRemote()
item.name = tokens[0]
item.url = tokens[1]
item.type = tokens[2]
result.append(item)
return result
class GitFileChangeDescription:
change_type = None
file = ''
def __str__(self):
if self.change_type != None:
string = '%s (%s)' %(self.file.decode(), self.change_type.decode())
else:
string = '%s' %(self.file.decode())
return string
@staticmethod
def parse(line):
result = GitFileChangeDescription()
line = line.replace(b'(new commits)', b'')
line = line.strip()
tokens = line.split(b':')
if len(tokens) == 1:
result.file = tokens[0]
else:
result.change_type = tokens[0]
result.file = tokens[1].strip()
return result
class GitLog:
commit = None
author = None
date = None
description = None
merge = None
def __str__(self):
string = 'commit %s\r\nAuthor: %s\r\nDate: %s' %(self.commit.decode(), self.author.decode(), self.date.decode())
return string
@staticmethod
def parse(cmdres):
result = []
item = None
for line in cmdres.output.split(b'\n'):
if line.startswith(b'commit'):
item = GitLog()
item.commit = line.split(b' ')[1]
result.append(item)
elif line.startswith(b'Merge:'):
item.merge = line[7:]
elif line.startswith(b'Author:'):
item.author = line[8:]
elif line.startswith(b'Date:'):
item.date = line[8:]
elif line == b'':
continue
else:
if item.description == None:
item.description = line
else:
item.description += b'\n' + line
return result
class GitTag:
@staticmethod
def parse(cmdres):
result = cmdres.output.split(b'\n')
return result
class GitStatus:
branch = ''
staged = None
not_staged = None
untracked = None
def __str__(self):
string = 'branch: %s' %(str(self.branch))
if len(self.staged) > 0:
string = string + '\r\nStaged:'
for file in self.staged:
string = string + '\r\n\t' + str(file)
if len(self.not_staged) > 0:
string = string + '\r\nNot staged:'
for file in self.not_staged:
string = string + '\r\n\t' + str(file)
if len(self.untracked) > 0:
string = string + '\r\nUntracked:'
for file in self.untracked:
string = string + '\r\n\t' + str(file)
return string
@staticmethod
def parse(cmdres):
result = None
if cmdres != None:
result = GitStatus()
result.staged = []
result.not_staged = []
result.untracked = []
is_reading_staged = False
is_reading_not_staged = False
is_reading_untracked = False
for line in cmdres.output.split(b'\n'):
if b'(use' in line or line == b'':
continue
elif b'Changes to be committed' in line:
is_reading_staged = True
is_reading_not_staged = False
is_reading_untracked = False
elif b'Changes not staged for commit' in line:
is_reading_staged = False
is_reading_not_staged = True
is_reading_untracked = False
elif b'Untracked files' in line:
is_reading_staged = False
is_reading_not_staged = False
is_reading_untracked = True
elif is_reading_staged == True:
file = GitFileChangeDescription.parse(line)
if file != None:
result.staged.append(file)
elif is_reading_not_staged == True:
file = GitFileChangeDescription.parse(line)
if file != None:
result.not_staged.append(file)
elif is_reading_untracked == True:
file = GitFileChangeDescription.parse(line)
if file != None:
result.untracked.append(file)
elif b'On branch' in line:
result.branch = line.split(b' ')[2]
elif b'HEAD detached at' in line:
result.branch = line.split(b' ')[3]
return result
class GitResetMode:
Mixed = 0
Soft = 1
Hard = 2
Merged = 3
Keep = 4
class GitSubmoduleStatus:
is_current_commit_checked_out = True
is_initialized = True
has_merge_conflicts = False
current_commit_id_checked_out = None
path = None
def __str__(self):
attributes = []
if not self.is_current_commit_checked_out:
attributes.append('+')
if self.has_merge_conflicts:
attributes.append('U')
if not self.is_initialized:
attributes.append('-')
if len(attributes) > 0:
attributes = ",".join(attributes)
attributes = '(' + attributes + ')'
string = '%s %s %s' %(self.current_commit_id_checked_out.decode(), self.path.decode(), attributes)
else:
string = '%s %s' %(self.current_commit_id_checked_out.decode(), self.path.decode())
return string
@staticmethod
def parse(cmdres):
result = None
if cmdres.returncode == 0:
result = []
for line in cmdres.output.split(b'\n'):
if line == b'':
continue
item = GitSubmoduleStatus()
if line[0:1] == b'U':
item.has_merge_conflicts = True
elif line[0:1] == b'-':
item.is_initialized = False
elif line[0:1] == b'+':
item.is_current_commit_checked_out = False
item.current_commit_id_checked_out = line[1:41]
item.path = line[42:line.index(b' (')]
result.append(item)
return result
class GitClient:
@staticmethod
def clone(path='.', url='', recursive=False):
result = None
if url == '':
logger.error("cannot clone repo (invalid url)")
result = None
else:
try:
os.chdir(path)
full_cmd = "git clone %s" % (url)
logger.info(full_cmd)
output = subprocess.check_output(full_cmd)
except:
result = None
return result
@staticmethod
def open(path='.'):
result = None
try:
logger.info('Opening git repo %s' %(path))
if path != '.':
os.chdir(path)
cmd = command.execute("git status")
if cmd.output.find(b'Not a git repository') != -1:
logger.error('Not a git repository')
result = None
else:
result = GitClient()
except:
logger.error("Failed opening repository")
result = None
return result
def status(self):
result = None;
full_cmd = "git status"
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git status returned %s, code=%d", cmd.output, cmd.returncode)
result = None
else:
result = GitStatus.parse(cmd)
return result
def submodule(self, subcmd = 'status', recursive=False, init=False, deinit=False):
result = None;
full_cmd = "git submodule %s" %(subcmd)
if recursive:
full_cmd = full_cmd + " --recursive"
if init:
full_cmd = full_cmd + " --init"
if deinit:
full_cmd = full_cmd + " --deinit"
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git %s returned %s, code=%d", subcmd, cmd.output, cmd.returncode)
result = None
else:
if subcmd == 'status':
result = GitSubmoduleStatus.parse(cmd)
else:
result = cmd.returncode
return result
def checkout(self, target='', create_branch=False):
result = None
if target == b'':
logger.error("Cannot checkout, target not provided")
else:
full_cmd = "git checkout"
if create_branch:
full_cmd = full_cmd + " -b"
full_cmd = full_cmd + (" %s" %(target))
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git checkout returned %s, code=%d", cmd.output, cmd.returncode)
else:
result = cmd.returncode
return result
def add(self, target=''):
result = None
if target == b'':
logger.error("Cannot add, target not provided")
else:
full_cmd = "git add %s" %(target)
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git add returned %s, code=%d", cmd.output, cmd.returncode)
else:
result = cmd.returncode
return result
def rm(self, target=''):
result = None
if target == b'':
logger.error("Cannot rm, target not provided")
else:
full_cmd = "git rm %s" %(target)
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git rm returned %s, code=%d", cmd.output, cmd.returncode)
else:
result = cmd.returncode
return result
def commit(self, message='', amend=False):
result = None
if message == b'' and amend == False:
logger.error("Cannot commit, message not provided")
else:
full_cmd = "git commit"
if amend:
full_cmd = full_cmd + " --amend --no-edit"
else:
full_cmd = full_cmd + (" -m \"%s\"" %(message))
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git commit returned %s, code=%d", cmd.output, cmd.returncode)
else:
result = cmd.returncode
return result
def log(self, n=1, author=None, branch=None, path=None):
result = None
if n <= 0:
logger.error("Cannot query log, n is less or equal zero")
else:
full_cmd = "git log -n %d" %(n)
if author != None:
full_cmd = full_cmd + (" --author %s" %(author))
if branch != None:
full_cmd = full_cmd + (" -b %s" %(branch))
if path != None:
full_cmd = full_cmd + (" -- %s" %(path))
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git log returned %s, code=%d", cmd.output, cmd.returncode)
else:
result = GitLog.parse(cmd)
return result
def pull(self, repo='origin', refspec=None):
result = None
full_cmd = "git pull %s" %(repo)
if refspec != None:
full_cmd = full_cmd + (" %s" %(refspec))
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git pull returned %s, code=%d", cmd.output, cmd.returncode)
result = cmd.returncode
return result
def push(self, repo='origin', refspec=None, set_upstream=False, force=False, tags=False):
result = None
full_cmd = "git push"
if set_upstream:
full_cmd = full_cmd + " --set-upstream"
if force:
full_cmd = full_cmd + " --force"
if tags:
full_cmd = full_cmd + " --tags"
full_cmd = full_cmd + (" %s" %(repo))
if refspec != None:
full_cmd = full_cmd + (" %s" %(refspec))
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git push returned: %s", str(cmd))
result = cmd.returncode
return result
def branch(self, branch=None, set_upstream_to=None, set_upstream=False, unset_upstream=False, rename_to=None, delete=False):
result = None
full_cmd = "git branch"
if set_upstream_to != None:
if set_upstream_to == b'':
full_cmd = None
logger.error("git branch failed: set_upstream_to is invalid")
else:
full_cmd = full_cmd + (" --set-upstream-to=%s" %(set_upstream_to))
if branch != None:
full_cmd = full_cmd + (" %s" %(branch))
elif unset_upstream:
full_cmd = full_cmd + " --unset-upstream"
if branch != None:
full_cmd = full_cmd + (" %s" %(branch))
elif rename_to != None:
if rename_to == b'':
full_cmd = None
logger.error("git branch failed: rename_to value is invalid")
else:
full_cmd = full_cmd + (" -m %s" %(rename_to))
elif delete:
if branch == None or branch == b'':
full_cmd = None
logger.error("git branch delete failed: branch value is invalid")
else:
full_cmd = full_cmd + (" -D" %(branch))
else:
if branch == None:
full_cmd = None
logger.error("git branch failed: branch value is not provided")
elif branch == b'':
full_cmd = None
logger.error("git branch failed: branch value is invalid")
else:
full_cmd = full_cmd + (" %s" %(branch))
if full_cmd != None:
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git branch returned: %s", str(cmd))
result = cmd.returncode
return result
def reset(self, commit=None, mode=GitResetMode.Mixed):
result = None
full_cmd = "git reset"
if mode == GitResetMode.Mixed:
full_cmd = full_cmd + " --mixed"
elif mode == GitResetMode.Soft:
full_cmd = full_cmd + " --soft"
elif mode == GitResetMode.Hard:
full_cmd = full_cmd + " --hard"
elif mode == GitResetMode.Merged:
full_cmd = full_cmd + " --merged"
elif mode == GitResetMode.Keep:
full_cmd = full_cmd + " --keep"
else:
full_cmd = None
logger.error("git reset failed: mode value is invalid")
if full_cmd != None:
if commit != None:
full_cmd = full_cmd + (" %s" %(commit))
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git reset returned: %s", str(cmd))
result = cmd.returncode
return result
def remote(self, name=None, url=None, branch=None, prune=False, add=False, remove=False):
result = None
needs_parsing = False
full_cmd = "git remote"
if add:
if name == None or url == None:
logger.error("git remote failed: name and/or url value is invalid")
full_cmd = None
else:
if branch != None:
full_cmd += " -t %s" %(branch)
full_cmd += " %s %s" %(name, url)
elif remove:
if name == None:
logger.error("git remote failed: name value is invalid")
full_cmd = None
else:
full_cmd += " remove %s" %(name)
else:
full_cmd += " -v"
needs_parsing = True
if full_cmd != None:
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git remote returned: %s", str(cmd))
result = cmd.returncode
elif needs_parsing:
result = GitRemote.parse(cmd)
else:
result = cmd.returncode
return result
def tag(self, tag=None, message=None, commit=None, annotate=False, add=False, delete=False):
result = None
parse_result = True
full_cmd = "git tag"
if add == True:
if tag == None:
full_cmd = None
logger.error("git tag failed: tag value is invalid")
else:
if annotate == True:
full_cmd += " -a"
full_cmd += (" %s" %(tag))
if commit != None:
full_cmd += (" %s" %(commit))
if message != None:
full_cmd += (" -m \"%s\"" %(message))
parse_result = False
elif delete == True:
if tag == None:
full_cmd = None
logger.error("git tag failed: tag value is invalid")
else:
full_cmd += (" -d %s" %(tag))
parse_result = False
else:
full_cmd += " --list"
if full_cmd != None:
logger.info(full_cmd)
cmd = command.execute(full_cmd)
if cmd.returncode != 0:
logger.error("git tag returned: %s", str(cmd))
elif parse_result:
result = GitTag.parse(cmd)
else:
result = cmd.returncode
return result
def merge(self):
result = None
return result
def mv(self):
result = None
return result
def init(self):
result = None
return result
def setCredentials(self):
result = None
return result
|
ewertons/gitclient
|
gitclient.py
|
Python
|
mit
| 16,102
|
#!/usr/bin/python3
def ask(questionText, answerTypeAllowed = 0, allowedAnswers = None, acceptedAnswerText = "Answer accepted.", typeErrorText = "Your answer was of the wrong type.", badAnswerText = "Your answer was not one of the allowed answers.", genericErrorText = "An error occured.", defaultAnswer = None):
"""
Ask a question on the command line.
answerTypeAllowed : What type of answers to allow. Defaults to any type.
questionText : What question the user is asked. This is required.
allowedAnswers : What values are allowed. Defaults to any.
acceptedAnswerText: What to say when the answer is accepted. `None` to supress printing.
typeErrorText : The text printed if the input was of the wrong type.
badAnswerText : What is printed when the answer is not one of the specified allowed ones.
genericErrorText : What happens when an error occurs.
defaultAnswer : If the user does not enter anything, what's the default answer. None to ignore.
answerTypeAllowed Values:
0 : Any type.
1 : String
2 : Integer
3 : Float
4 : Boolean
"""
# TODO: Simplify more.
def badAnswer():
print(badAnswerText)
input("Press enter to continue. ")
if answerTypeAllowed not in list(range(5)):
print("There was an error with internal coding :(")
print("Please tell the maintainer this error:")
print("answerTypeAllowed out of bounds when ask_question function called.")
print("answerTypeAllowed was "+str(answerTypeAllowed)+" when allowed values are 0-4.")
return
while 1:
userInput = input(questionText + " ")
if (not userInput) and (defaultAnswer):
if acceptedAnswerText: print(acceptedAnswerText)
return defaultAnswer
if allowedAnswers is not None:
if answerTypeAllowed == 0:
if userInput in allowedAnswers:
if acceptedAnswerText: print(acceptedAnswerText)
return userInput
else:
badAnswer()
continue
elif answerTypeAllowed == 1:
if str(userInput) in allowedAnswers:
if acceptedAnswerText: print(acceptedAnswerText)
return str(userInput)
else:
badAnswer()
continue
elif answerTypeAllowed == 2:
try:
int(userInput)
except ValueError:
print(typeErrorText)
input("Press enter to continue. ")
continue
if int(userInput) in allowedAnswers:
if acceptedAnswerText: print(acceptedAnswerText)
return int(userInput)
else:
badAnswer()
continue
elif answerTypeAllowed == 3:
try:
float(userInput)
except ValueError:
print(typeErrorText)
input("Press enter to continue. ")
continue
if float(userInput) in allowedAnswers:
if acceptedAnswerText: print(acceptedAnswerText)
return float(userInput)
else:
badAnswer()
continue
elif answerTypeAllowed == 4:
if str(userInput).lower() in ['y', 'n', 'yes', 'no', '0', '1', 'true', 'false', 'affirmative', 'positive', 'negative', 'affirmation', 'affirmed']:
if str(userInput).lower() in ['y', 'yes', '1', 'true', 'positive', 'affirmative', 'affirmation', 'affirmed']:
if acceptedAnswerText: print(acceptedAnswerText)
return True
else:
if acceptedAnswerText: print(acceptedAnswerText)
return False
else:
print(typeErrorText)
input("Press enter to continue. ")
continue
else:
print(genericErrorText)
input("Press enter to continue. ")
continue
elif allowedAnswers is None:
if answerTypeAllowed == 0:
if acceptedAnswerText: print(acceptedAnswerText)
return userInput
elif answerTypeAllowed == 1:
if acceptedAnswerText: print(acceptedAnswerText)
return str(userInput)
elif answerTypeAllowed == 2:
try:
int(userInput)
except ValueError:
print(typeErrorText)
input("Press enter to continue. ")
continue
if acceptedAnswerText: print(acceptedAnswerText)
return int(userInput)
elif answerTypeAllowed == 3:
try:
float(userInput)
except ValueError:
print(typeErrorText)
input("Press enter to continue. ")
continue
if acceptedAnswerText: print(acceptedAnswerText)
return float(userInput)
elif answerTypeAllowed == 4:
if str(userInput).lower() in ['y', 'n', 'yes', 'no', '0', '1', 'true', 'false']:
if str(userInput).lower() in ['y', 'yes', '1', 'true']:
if acceptedAnswerText: print(acceptedAnswerText)
return True
else:
if acceptedAnswerText: print(acceptedAnswerText)
return False
else:
print(typeErrorText)
input("Press enter to continue. ")
continue
else:
print(genericErrorText)
input("Press enter to continue. ")
continue
else:
print(genericErrorText)
input("Press enter to continue. ")
continue
if __name__ == "__main__":
print("askquestion.py Version 1.0")
result = ask("Do you wish to see some demos of askquestion.py (Y/n):", answerTypeAllowed = 4, allowedAnswers = None, acceptedAnswerText = None, defaultAnswer = True)
if result:
print("You just saw one! askquestion.py supports many different types of questions.")
while 1:
result = ask("""(1) Anything
(2) String (basically above, just always returns as string)
(3) Integer
(4) Float
(5) Boolean
Type the corresponding number to try one or zero to cancel (0-5):""", answerTypeAllowed = 2, allowedAnswers = list(range(0, 6)), acceptedAnswerText = None)
if result == 0:
break
elif result == 1:
result = ask("Enter anything:", answerTypeAllowed = 0, acceptedAnswerText = None)
print("You entered "+str(result)+"!")
elif result == 2:
result = ask("Enter 'foo', 'bar' or 'derp':", answerTypeAllowed = 1, allowedAnswers = ["foo", "bar", "derp"], acceptedAnswerText = None)
print("You entered "+result+"!")
elif result == 3:
result = ask("Enter a number from one to ten (1-10):", answerTypeAllowed = 2, allowedAnswers = list(range(10)), acceptedAnswerText = None)
print("You entered "+str(result)+"!")
elif result == 4:
result = ask("Please enter a float:", answerTypeAllowed = 3, acceptedAnswerText = None)
print("You entered "+str(result)+"!")
else:
result = ask("Please enter a boolean (Y/n):", answerTypeAllowed = 4, acceptedAnswerText = None, defaultAnswer = True)
print("You entered "+str(result)+"!")
print("Thanks for using askquestion.py! Exiting.")
|
bearbin/box-server
|
askquestion.py
|
Python
|
mit
| 8,008
|
"""
Use the 'Dummy' auth provider for generic integration tests of third_party_auth.
"""
import unittest
from third_party_auth.tests import testutil
from .base import IntegrationTestMixin
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class GenericIntegrationTest(IntegrationTestMixin, testutil.TestCase):
"""
Basic integration tests of third_party_auth using Dummy provider
"""
PROVIDER_ID = "oa2-dummy"
PROVIDER_NAME = "Dummy"
PROVIDER_BACKEND = "dummy"
USER_EMAIL = "adama@fleet.colonies.gov"
USER_NAME = "William Adama"
USER_USERNAME = "Galactica1"
def setUp(self):
super(GenericIntegrationTest, self).setUp()
self.configure_dummy_provider(enabled=True)
def do_provider_login(self, provider_redirect_url):
"""
Mock logging in to the Dummy provider
"""
# For the Dummy provider, the provider redirect URL is self.complete_url
self.assertEqual(provider_redirect_url, self.url_prefix + self.complete_url)
return self.client.get(provider_redirect_url)
|
defance/edx-platform
|
common/djangoapps/third_party_auth/tests/specs/test_generic.py
|
Python
|
agpl-3.0
| 1,103
|
#! /usr/bin/env python3
import http.server
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class WrapperScriptTests(unittest.TestCase):
http_port = 8080
default_download_url = "http://localhost:" + str(http_port) + "/test/testapp.jar"
def setUp(self):
self.start_server()
self.cache_dir = tempfile.mkdtemp()
def tearDown(self):
self.stop_server()
shutil.rmtree(self.cache_dir)
def test_first_run(self):
result = self.run_script(["arg1", "arg 2"])
output = result.stdout
self.assertIn("Downloading batect", output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\\\n".format(self.get_script_dir()), output)
self.assertIn("HOSTNAME is: {}\n".format(os.environ['COMPUTERNAME']), output)
self.assertIn("I received 2 arguments.\narg1\narg 2\n", output)
self.assertNotIn("WARNING: you should never see this", output)
self.assertEqual(result.returncode, 0)
def test_second_run(self):
first_result = self.run_script(["arg 1", "arg 2"])
first_output = first_result.stdout
self.assertIn("Downloading batect", first_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\\\n".format(self.get_script_dir()), first_output)
self.assertIn("HOSTNAME is: {}\n".format(os.environ['COMPUTERNAME']), first_output)
self.assertIn("I received 2 arguments.\narg 1\narg 2\n", first_output)
self.assertEqual(first_result.returncode, 0)
second_result = self.run_script(["arg 3", "arg 4"])
second_output = second_result.stdout
self.assertNotIn("Downloading batect", second_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\\\n".format(self.get_script_dir()), second_output)
self.assertIn("HOSTNAME is: {}\n".format(os.environ['COMPUTERNAME']), second_output)
self.assertIn("I received 2 arguments.\narg 3\narg 4\n", second_output)
self.assertEqual(first_result.returncode, 0)
def test_download_fails(self):
result = self.run_script(["arg 1", "arg 2"], download_url=self.default_download_url + "-does-not-exist")
self.assertIn("Downloading batect", result.stdout)
self.assertIn("(404) Not Found", result.stdout)
self.assertNotIn("WARNING: you should never see this", result.stdout)
self.assertNotEqual(result.returncode, 0)
def test_no_java(self):
path_dir = self.create_limited_path()
result = self.run_script([], path=path_dir)
self.assertIn("Java is not installed or not on your PATH. Please install it and try again.", result.stdout)
self.assertNotEqual(result.returncode, 0)
def test_unsupported_java(self):
path_dir = self.create_limited_path_for_specific_java_version("7")
result = self.run_script([], path=path_dir)
self.assertIn("The version of Java that is available on your PATH is version 1.7, but version 1.8 or greater is required.\n" +
"If you have a newer version of Java installed, please make sure your PATH is set correctly.", result.stdout)
self.assertNotIn("The application has started.", result.stdout)
self.assertNotEqual(result.returncode, 0)
def test_32bit_java(self):
path_dir = self.create_limited_path_for_specific_java_version("8-32bit")
result = self.run_script([], path=path_dir)
self.assertIn("The version of Java that is available on your PATH is a 32-bit version, but batect requires a 64-bit Java runtime.\n" +
"If you have a 64-bit version of Java installed, please make sure your PATH is set correctly.", result.stdout)
self.assertNotIn("The application has started.", result.stdout)
self.assertNotEqual(result.returncode, 0)
def test_supported_java(self):
opens_args = "Args are: \"--add-opens\" \"java.base/sun.nio.ch=ALL-UNNAMED\" \"--add-opens\" \"java.base/java.io=ALL-UNNAMED\""
for version in [8, 9, 10, 11]:
with self.subTest(java_version=version):
path_dir = self.create_limited_path_for_specific_java_version(version)
result = self.run_script([], path=path_dir)
self.assertIn("The application has started.", result.stdout)
if version >= 9:
self.assertIn(opens_args, result.stdout)
else:
self.assertNotIn(opens_args, result.stdout)
self.assertEqual(result.returncode, 0)
def test_non_zero_exit(self):
result = self.run_script(["exit-non-zero"])
output = result.stdout
self.assertIn("The Java application has started.", output)
self.assertNotIn("WARNING: you should never see this", output)
self.assertEqual(result.returncode, 123)
def test_no_args(self):
result = self.run_script([])
output = result.stdout
self.assertIn("Downloading batect", output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\\\n".format(self.get_script_dir()), output)
self.assertIn("HOSTNAME is: {}\n".format(os.environ['COMPUTERNAME']), output)
self.assertIn("I received 0 arguments.\n", output)
self.assertNotIn("WARNING: you should never see this", output)
self.assertEqual(result.returncode, 0)
def test_one_arg(self):
result = self.run_script(["arg1"])
output = result.stdout
self.assertIn("Downloading batect", output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\\\n".format(self.get_script_dir()), output)
self.assertIn("HOSTNAME is: {}\n".format(os.environ['COMPUTERNAME']), output)
self.assertIn("I received 1 arguments.\narg1\n", output)
self.assertNotIn("WARNING: you should never see this", output)
self.assertEqual(result.returncode, 0)
def create_limited_path(self):
powershellDir = os.path.join(os.environ["SYSTEMROOT"], "System32", "WindowsPowerShell", "v1.0")
return powershellDir
def create_limited_path_for_specific_java_version(self, version):
javaDir = os.path.join(self.get_tests_dir(), "fakes", "java" + str(version))
return ";".join([
self.create_limited_path(),
javaDir
])
def run_script(self, args, download_url=default_download_url, path=os.environ["PATH"]):
env = {
**os.environ,
"BATECT_CACHE_DIR": self.cache_dir,
"BATECT_DOWNLOAD_URL": download_url,
"PATH": path
}
path = self.get_script_path()
command = [path] + args
return subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, text=True, encoding='utf-8') # utf-16le
def get_tests_dir(self):
return os.path.dirname(os.path.realpath(__file__))
def get_script_dir(self):
return os.path.abspath(os.path.join(self.get_tests_dir(), "..", "build", "scripts"))
def get_script_path(self):
return os.path.join(self.get_script_dir(), "batect.cmd")
def start_server(self):
self.server = http.server.HTTPServer(("", self.http_port), QuietHTTPHandler)
threading.Thread(target=self.server.serve_forever, daemon=True).start()
def stop_server(self):
self.server.shutdown()
self.server.server_close()
class QuietHTTPHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
if __name__ == '__main__':
unittest.main()
|
charleskorn/batect
|
wrapper/windows/test/tests.py
|
Python
|
apache-2.0
| 7,673
|
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import serializers
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound
from rest_framework.mixins import (CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin)
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import (GenericViewSet, ModelViewSet,
ReadOnlyModelViewSet)
from rest_framework_extensions.cache.mixins import RetrieveCacheResponseMixin
from rest_framework_extensions.mixins import NestedViewSetMixin
from rdmo.conditions.models import Condition
from rdmo.core.permissions import HasModelPermission, HasObjectPermission
from rdmo.core.utils import human2bytes, return_file_response
from rdmo.options.models import OptionSet
from rdmo.questions.models import Catalog, Question, QuestionSet
from .filters import SnapshotFilterBackend, ValueFilterBackend
from .models import (Continuation, Integration, Issue, Membership, Project,
Snapshot, Value)
from .serializers.v1 import (IntegrationSerializer, IssueSerializer,
MembershipSerializer,
ProjectIntegrationSerializer,
ProjectIssueSerializer,
ProjectMembershipSerializer,
ProjectMembershipUpdateSerializer,
ProjectSerializer, ProjectSnapshotSerializer,
ProjectValueSerializer, SnapshotSerializer,
ValueSerializer)
from .serializers.v1.overview import ProjectOverviewSerializer
from .serializers.v1.questionset import QuestionSetSerializer
class ProjectViewSet(ModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'user',
'user__username',
'catalog',
'catalog__uri',
'catalog__key',
)
def get_queryset(self):
return Project.objects.filter_user(self.request.user)
@action(detail=True, permission_classes=(IsAuthenticated, ))
def overview(self, request, pk=None):
project = self.get_object()
project.catalog = Catalog.objects.prefetch_related(
'sections',
'sections__questionsets',
'sections__questionsets__questions'
).get(id=project.catalog_id)
serializer = ProjectOverviewSerializer(project, context={'request': request})
return Response(serializer.data)
@action(detail=True, permission_classes=(HasModelPermission | HasObjectPermission, ))
def resolve(self, request, pk=None):
try:
condition = Condition.objects.get(pk=request.GET.get('condition'))
return Response({'result': condition.resolve(self.get_object(), None)})
except Condition.DoesNotExist:
return Response({'result': False})
@action(detail=True, permission_classes=(HasModelPermission | HasObjectPermission, ))
def options(self, request, pk=None):
project = self.get_object()
try:
optionset = OptionSet.objects.get(pk=request.GET.get('optionset'))
# check if the optionset belongs to this catalog and if it has a provider
if Question.objects.filter_by_catalog(project.catalog).filter(optionsets=optionset) and \
optionset.provider is not None:
options = optionset.provider.get_options(project)
return Response(options)
except OptionSet.DoesNotExist:
pass
# if it didn't work return 404
raise NotFound()
@action(detail=True, permission_classes=(IsAuthenticated, ))
def progress(self, request, pk=None):
project = self.get_object()
return Response(project.progress)
def perform_create(self, serializer):
project = serializer.save(site=get_current_site(self.request))
# add current user as owner
membership = Membership(project=project, user=self.request.user, role='owner')
membership.save()
class ProjectNestedViewSetMixin(NestedViewSetMixin):
def initial(self, request, *args, **kwargs):
self.project = self.get_project_from_parent_viewset()
super().initial(request, *args, **kwargs)
def get_project_from_parent_viewset(self):
try:
return Project.objects.filter_user(self.request.user).get(pk=self.get_parents_query_dict().get('project'))
except Project.DoesNotExist:
raise Http404
def get_list_permission_object(self):
return self.project
def get_detail_permission_object(self, obj):
return self.project
def perform_create(self, serializer):
serializer.save(project=self.project)
class ProjectMembershipViewSet(ProjectNestedViewSetMixin, ModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'user',
'user__username',
'role'
)
def get_queryset(self):
try:
return Membership.objects.filter(project=self.project)
except AttributeError:
# this is needed for the swagger ui
return Membership.objects.none()
def get_serializer_class(self):
if self.action == 'update':
return ProjectMembershipUpdateSerializer
else:
return ProjectMembershipSerializer
class ProjectIntegrationViewSet(ProjectNestedViewSetMixin, ModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectIntegrationSerializer
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'provider_key',
)
def get_queryset(self):
try:
return Integration.objects.filter(project=self.project)
except AttributeError:
# this is needed for the swagger ui
return Integration.objects.none()
class ProjectIssueViewSet(ProjectNestedViewSetMixin, ListModelMixin, RetrieveModelMixin,
UpdateModelMixin, GenericViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectIssueSerializer
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'task',
'task__uri',
'status'
)
def get_queryset(self):
try:
return Issue.objects.filter(project=self.project).prefetch_related('resources')
except AttributeError:
# this is needed for the swagger ui
return Issue.objects.none()
class ProjectSnapshotViewSet(ProjectNestedViewSetMixin, CreateModelMixin, RetrieveModelMixin,
UpdateModelMixin, ListModelMixin, GenericViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectSnapshotSerializer
def get_queryset(self):
try:
return self.project.snapshots.all()
except AttributeError:
# this is needed for the swagger ui
return Snapshot.objects.none()
class ProjectValueViewSet(ProjectNestedViewSetMixin, ModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectValueSerializer
filter_backends = (ValueFilterBackend, DjangoFilterBackend)
filterset_fields = (
'attribute',
'attribute__path',
'option',
'option__path',
)
def get_queryset(self):
try:
return self.project.values.filter(snapshot=None)
except AttributeError:
# this is needed for the swagger ui
return Value.objects.none()
@action(detail=True, methods=['GET', 'POST'],
permission_classes=(HasModelPermission | HasObjectPermission, ))
def file(self, request, parent_lookup_project, pk=None):
value = self.get_object()
if request.method == 'POST':
value.file = request.FILES.get('file')
# check if the project is reached
if value.file and value.file.size + value.project.file_size > human2bytes(settings.PROJECT_FILE_QUOTA):
raise serializers.ValidationError({
'value': [_('You reached the file quota for this project.')]
})
value.save()
serializer = self.get_serializer(value)
return Response(serializer.data)
else:
if value.file:
return return_file_response(value.file.name, value.file_type)
# if it didn't work return 404
raise NotFound()
class ProjectQuestionSetViewSet(ProjectNestedViewSetMixin, RetrieveCacheResponseMixin, RetrieveModelMixin, GenericViewSet):
permission_classes = (IsAuthenticated, )
serializer_class = QuestionSetSerializer
def get_queryset(self):
return QuestionSet.objects.order_by_catalog(self.project.catalog)
def dispatch(self, *args, **kwargs):
response = super().dispatch(*args, **kwargs)
if response.status_code == 200 and kwargs.get('pk'):
try:
continuation = Continuation.objects.get(project=self.project, user=self.request.user)
except Continuation.DoesNotExist:
continuation = Continuation(project=self.project, user=self.request.user)
continuation.questionset_id = kwargs.get('pk')
continuation.save()
return response
@action(detail=False, url_path='continue', permission_classes=(IsAuthenticated, ))
def get_continue(self, request, pk=None, parent_lookup_project=None):
try:
continuation = Continuation.objects.get(project=self.project, user=self.request.user)
questionset = continuation.questionset
except Continuation.DoesNotExist:
questionset = self.get_queryset().first()
serializer = self.get_serializer(questionset)
return Response(serializer.data)
class MembershipViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = MembershipSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'user',
'user__username',
'role'
)
def get_queryset(self):
return Membership.objects.filter_user(self.request.user)
def get_detail_permission_object(self, obj):
return obj.project
class IntegrationViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = IntegrationSerializer
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'project',
'provider_key'
)
def get_queryset(self):
return Integration.objects.filter_user(self.request.user)
def get_detail_permission_object(self, obj):
return obj.project
class IssueViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = IssueSerializer
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'task',
'task__uri',
'status'
)
def get_queryset(self):
return Issue.objects.filter_user(self.request.user).prefetch_related('resources')
def get_detail_permission_object(self, obj):
return obj.project
class SnapshotViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = SnapshotSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'project'
)
def get_queryset(self):
return Snapshot.objects.filter_user(self.request.user)
def get_detail_permission_object(self, obj):
return obj.project
class ValueViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ValueSerializer
filter_backends = (SnapshotFilterBackend, DjangoFilterBackend)
filterset_fields = (
'project',
'attribute',
'attribute__path',
'option',
'option__path',
)
def get_queryset(self):
return Value.objects.filter_user(self.request.user)
def get_detail_permission_object(self, obj):
return obj.project
@action(detail=True, permission_classes=(HasModelPermission | HasObjectPermission, ))
def file(self, request, pk=None):
value = self.get_object()
if value.file:
return return_file_response(value.file.name, value.file_type)
# if it didn't work return 404
raise NotFound()
|
DMPwerkzeug/DMPwerkzeug
|
rdmo/projects/viewsets.py
|
Python
|
apache-2.0
| 13,206
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import sys, Ice, time
class Client(Ice.Application):
def interruptCallback(self, sig):
print "handling signal " + str(sig)
# SIGINT interrupts time.sleep so a custom method is needed to
# sleep for a given interval.
def sleep(self, interval):
start = time.time()
while True:
sleepTime = (start + interval) - time.time()
if sleepTime <= 0:
break
time.sleep(sleepTime)
def run(self, args):
self.ignoreInterrupt()
print "Ignore CTRL+C and the like for 5 seconds (try it!)"
self.sleep(5)
self.callbackOnInterrupt()
self.holdInterrupt()
print "Hold CTRL+C and the like for 5 seconds (try it!)"
self.sleep(5)
self.releaseInterrupt()
print "Release CTRL+C (any held signals should be released)"
self.sleep(5)
self.holdInterrupt()
print "Hold CTRL+C and the like for 5 seconds (try it!)"
self.sleep(5)
self.callbackOnInterrupt()
print "Release CTRL+C (any held signals should be released)"
self.sleep(5)
self.shutdownOnInterrupt()
print "Test shutdown on destroy. Press CTRL+C to shutdown & terminate"
self.communicator().waitForShutdown()
print "ok"
return False
app = Client()
sys.exit(app.main(sys.argv))
|
joshmoore/zeroc-ice
|
py/test/Ice/application/Client.py
|
Python
|
gpl-2.0
| 1,730
|
# © 2016 Tecnativa - Vicent Cubells
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl-3.0).
from odoo.exceptions import UserError
from odoo.tests import common
class TestRecursion(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestRecursion, cls).setUpClass()
cls.department_obj = cls.env["res.partner.department"]
# Instances
cls.dpt1 = cls.department_obj.create({"name": "Dpt. 1"})
cls.dpt2 = cls.department_obj.create(
{"name": "Dep. 2", "parent_id": cls.dpt1.id}
)
def test_recursion(self):
""" Testing recursion """
self.dpt3 = self.department_obj.create(
{"name": "Dep. 3", "parent_id": self.dpt2.id}
)
# Creating a parent's child department using dpt1.
with self.assertRaises(UserError):
self.dpt1.write(vals={"parent_id": self.dpt3.id})
|
OCA/partner-contact
|
partner_contact_department/tests/test_recursion.py
|
Python
|
agpl-3.0
| 915
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-09 20:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('elearning', '0010_auto_20160209_2042'),
]
operations = [
migrations.AddField(
model_name='setting',
name='logo',
field=models.CharField(max_length=256, null=True),
),
]
|
tkupek/tkupek-elearning
|
tkupek_elearning/elearning/migrations/0011_setting_logo.py
|
Python
|
gpl-3.0
| 461
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import copy
import grp
import inspect
try:
import argparse
except ImportError: # python 2.6
from . import argparse_compat as argparse
import os
import pwd
import sys
import textwrap
import types
from gunicorn import __version__
from gunicorn.errors import ConfigError
from gunicorn import six
from gunicorn import util
KNOWN_SETTINGS = []
PLATFORM = sys.platform
def wrap_method(func):
def _wrapped(instance, *args, **kwargs):
return func(*args, **kwargs)
return _wrapped
def make_settings(ignore=None):
settings = {}
ignore = ignore or ()
for s in KNOWN_SETTINGS:
setting = s()
if setting.name in ignore:
continue
settings[setting.name] = setting.copy()
return settings
class Config(object):
def __init__(self, usage=None, prog=None):
self.settings = make_settings()
self.usage = usage
self.prog = prog or os.path.basename(sys.argv[0])
self.env_orig = os.environ.copy()
def __getattr__(self, name):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
return self.settings[name].get()
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super(Config, self).__setattr__(name, value)
def set(self, name, value):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
self.settings[name].set(value)
def parser(self):
kwargs = {
"usage": self.usage,
"prog": self.prog
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument("-v", "--version",
action="version", default=argparse.SUPPRESS,
version="%(prog)s (version " + __version__ + ")\n",
help="show program's version number and exit")
parser.add_argument("args", nargs="*", help=argparse.SUPPRESS)
keys = list(self.settings)
def sorter(k):
return (self.settings[k].section, self.settings[k].order)
keys = sorted(self.settings, key=self.settings.__getitem__)
for k in keys:
self.settings[k].add_option(parser)
return parser
@property
def worker_class(self):
uri = self.settings['worker_class'].get()
worker_class = util.load_class(uri)
if hasattr(worker_class, "setup"):
worker_class.setup()
return worker_class
@property
def workers(self):
return self.settings['workers'].get()
@property
def address(self):
s = self.settings['bind'].get()
return [util.parse_address(six.bytes_to_str(bind)) for bind in s]
@property
def uid(self):
return self.settings['user'].get()
@property
def gid(self):
return self.settings['group'].get()
@property
def proc_name(self):
pn = self.settings['proc_name'].get()
if pn is not None:
return pn
else:
return self.settings['default_proc_name'].get()
@property
def logger_class(self):
uri = self.settings['logger_class'].get()
logger_class = util.load_class(uri, default="simple",
section="gunicorn.loggers")
if hasattr(logger_class, "install"):
logger_class.install()
return logger_class
@property
def is_ssl(self):
return self.certfile or self.keyfile
@property
def ssl_options(self):
opts = {}
if self.certfile:
opts['certfile'] = self.certfile
if self.keyfile:
opts['keyfile'] = self.keyfile
return opts
@property
def env(self):
raw_env = self.settings['raw_env'].get()
env = {}
if not raw_env:
return env
for e in raw_env:
s = six.bytes_to_str(e)
try:
k, v = s.split('=')
except ValueError:
raise RuntimeError("environement setting %r invalid" % s)
env[k] = v
return env
class SettingMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super(SettingMeta, cls).__new__
parents = [b for b in bases if isinstance(b, SettingMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS)
attrs["validator"] = wrap_method(attrs["validator"])
new_class = super_new(cls, name, bases, attrs)
new_class.fmt_desc(attrs.get("desc", ""))
KNOWN_SETTINGS.append(new_class)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
setattr(cls, "short", desc.splitlines()[0])
class Setting(object):
name = None
value = None
section = None
cli = None
validator = None
type = None
meta = None
action = None
default = None
short = None
desc = None
nargs = None
const = None
def __init__(self):
if self.default is not None:
self.set(self.default)
def add_option(self, parser):
if not self.cli:
return
args = tuple(self.cli)
help_txt = "%s [%s]" % (self.short, self.default)
help_txt = help_txt.replace("%", "%%")
kwargs = {
"dest": self.name,
"action": self.action or "store",
"type": self.type or str,
"default": None,
"help": help_txt
}
if self.meta is not None:
kwargs['metavar'] = self.meta
if kwargs["action"] != "store":
kwargs.pop("type")
if self.nargs is not None:
kwargs["nargs"] = self.nargs
if self.const is not None:
kwargs["const"] = self.const
parser.add_argument(*args, **kwargs)
def copy(self):
return copy.copy(self)
def get(self):
return self.value
def set(self, val):
assert six.callable(self.validator), "Invalid validator: %s" % self.name
self.value = self.validator(val)
def __lt__(self, other):
return (self.section == other.section and
self.order < other.order)
__cmp__ = __lt__
Setting = SettingMeta('Setting', (Setting,), {})
def validate_bool(val):
if isinstance(val, bool):
return val
if not isinstance(val, six.string_types):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_dict(val):
if not isinstance(val, dict):
raise TypeError("Value is not a dictionary: %s " % val)
return val
def validate_pos_int(val):
if not isinstance(val, six.integer_types):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_string(val):
if val is None:
return None
if not isinstance(val, six.string_types):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_list_string(val):
if not val:
return []
# legacy syntax
if isinstance(val, six.string_types):
val = [val]
return [validate_string(v) for v in val]
def validate_string_to_list(val):
val = validate_string(val)
if not val:
return []
return [v.strip() for v in val.split(",") if v]
def validate_class(val):
if inspect.isfunction(val) or inspect.ismethod(val):
val = val()
if inspect.isclass(val):
return val
return validate_string(val)
def validate_callable(arity):
def _validate_callable(val):
if isinstance(val, six.string_types):
try:
mod_name, obj_name = val.rsplit(".", 1)
except ValueError:
raise TypeError("Value '%s' is not import string. "
"Format: module[.submodules...].object" % val)
try:
mod = __import__(mod_name, fromlist=[obj_name])
val = getattr(mod, obj_name)
except ImportError as e:
raise TypeError(str(e))
except AttributeError:
raise TypeError("Can not load '%s' from '%s'"
"" % (obj_name, mod_name))
if not six.callable(val):
raise TypeError("Value is not six.callable: %s" % val)
if arity != -1 and arity != len(inspect.getargspec(val)[0]):
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def validate_user(val):
if val is None:
return os.geteuid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return pwd.getpwnam(val).pw_uid
except KeyError:
raise ConfigError("No such user: '%s'" % val)
def validate_group(val):
if val is None:
return os.getegid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return grp.getgrnam(val).gr_gid
except KeyError:
raise ConfigError("No such group: '%s'" % val)
def validate_post_request(val):
val = validate_callable(-1)(val)
largs = len(inspect.getargspec(val)[0])
if largs == 4:
return val
elif largs == 3:
return lambda worker, req, env, _r: val(worker, req, env)
elif largs == 2:
return lambda worker, req, _e, _r: val(worker, req)
else:
raise TypeError("Value must have an arity of: 4")
def validate_chdir(val):
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("can't chdir to %r" % val)
return path
def validate_file(val):
if val is None:
return None
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("%r not found" % val)
return path
def get_default_config_file():
config_path = os.path.join(os.path.abspath(os.getcwd()),
'gunicorn.conf.py')
if os.path.exists(config_path):
return config_path
return None
class ConfigFile(Setting):
name = "config"
section = "Config File"
cli = ["-c", "--config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The path to a Gunicorn config file.
Only has an effect when specified on the command line or as part of an
application specific configuration.
"""
class Bind(Setting):
name = "bind"
action = "append"
section = "Server Socket"
cli = ["-b", "--bind"]
meta = "ADDRESS"
validator = validate_list_string
if 'PORT' in os.environ:
default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))]
else:
default = ['127.0.0.1:8000']
desc = """\
The socket to bind.
A string of the form: 'HOST', 'HOST:PORT', 'unix:PATH'. An IP is a valid
HOST.
Multiple addresses can be bound. ex.::
$ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app
will bind the `test:app` application on localhost both on ipv6
and ipv4 interfaces.
"""
class Backlog(Setting):
name = "backlog"
section = "Server Socket"
cli = ["--backlog"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2048
desc = """\
The maximum number of pending connections.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class Workers(Setting):
name = "workers"
section = "Worker Processes"
cli = ["-w", "--workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1
desc = """\
The number of worker process for handling requests.
A positive integer generally in the 2-4 x $(NUM_CORES) range. You'll
want to vary this a bit to find the best for your particular
application's work load.
"""
class WorkerClass(Setting):
name = "worker_class"
section = "Worker Processes"
cli = ["-k", "--worker-class"]
meta = "STRING"
validator = validate_class
default = "sync"
desc = """\
The type of workers to use.
The default class (sync) should handle most 'normal' types of
workloads. You'll want to read
http://docs.gunicorn.org/en/latest/design.html for information
on when you might want to choose one of the other worker
classes.
A string referring to one of the following bundled classes:
* ``sync``
* ``eventlet`` - Requires eventlet >= 0.9.7
* ``gevent`` - Requires gevent >= 0.12.2 (?)
* ``tornado`` - Requires tornado >= 0.2
Optionally, you can provide your own worker by giving gunicorn a
python path to a subclass of gunicorn.workers.base.Worker. This
alternative syntax will load the gevent class:
``gunicorn.workers.ggevent.GeventWorker``. Alternatively the syntax
can also load the gevent class with ``egg:gunicorn#gevent``
"""
class WorkerConnections(Setting):
name = "worker_connections"
section = "Worker Processes"
cli = ["--worker-connections"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1000
desc = """\
The maximum number of simultaneous clients.
This setting only affects the Eventlet and Gevent worker types.
"""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
cli = ["--max-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a work
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
cli = ["-t", "--timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Workers silent for more than this many seconds are killed and restarted.
Generally set to thirty seconds. Only set this noticeably higher if
you're sure of the repercussions for sync workers. For the non sync
workers it just means that the worker process is still communicating and
is not tied to the length of time required to handle a single request.
"""
class GracefulTimeout(Setting):
name = "graceful_timeout"
section = "Worker Processes"
cli = ["--graceful-timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Timeout for graceful workers restart.
Generally set to thirty seconds. How max time worker can handle
request after got restart signal. If the time is up worker will
be force killed.
"""
class Keepalive(Setting):
name = "keepalive"
section = "Worker Processes"
cli = ["--keep-alive"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2
desc = """\
The number of seconds to wait for requests on a Keep-Alive connection.
Generally set in the 1-5 seconds range.
"""
class LimitRequestLine(Setting):
name = "limit_request_line"
section = "Security"
cli = ["--limit-request-line"]
meta = "INT"
validator = validate_pos_int
type = int
default = 4094
desc = """\
The maximum size of HTTP request line in bytes.
This parameter is used to limit the allowed size of a client's
HTTP request-line. Since the request-line consists of the HTTP
method, URI, and protocol version, this directive places a
restriction on the length of a request-URI allowed for a request
on the server. A server needs this value to be large enough to
hold any of its resource names, including any information that
might be passed in the query part of a GET request. Value is a number
from 0 (unlimited) to 8190.
This parameter can be used to prevent any DDOS attack.
"""
class LimitRequestFields(Setting):
name = "limit_request_fields"
section = "Security"
cli = ["--limit-request-fields"]
meta = "INT"
validator = validate_pos_int
type = int
default = 100
desc = """\
Limit the number of HTTP headers fields in a request.
This parameter is used to limit the number of headers in a request to
prevent DDOS attack. Used with the `limit_request_field_size` it allows
more safety. By default this value is 100 and can't be larger than
32768.
"""
class LimitRequestFieldSize(Setting):
name = "limit_request_field_size"
section = "Security"
cli = ["--limit-request-field_size"]
meta = "INT"
validator = validate_pos_int
type = int
default = 8190
desc = """\
Limit the allowed size of an HTTP request header field.
Value is a number from 0 (unlimited) to 8190. to set the limit
on the allowed size of an HTTP request header field.
"""
class Debug(Setting):
name = "debug"
section = "Debugging"
cli = ["--debug"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Turn on debugging in the server.
This limits the number of worker processes to 1 and changes some error
handling that's sent to clients.
"""
class Spew(Setting):
name = "spew"
section = "Debugging"
cli = ["--spew"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Install a trace function that spews every line executed by the server.
This is the nuclear option.
"""
class ConfigCheck(Setting):
name = "check_config"
section = "Debugging"
cli = ["--check-config", ]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Check the configuration..
"""
class PreloadApp(Setting):
name = "preload_app"
section = "Server Mechanics"
cli = ["--preload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Load application code before the worker processes are forked.
By preloading an application you can save some RAM resources as well as
speed up server boot times. Although, if you defer application loading
to each worker process, you can reload your application code easily by
restarting workers.
"""
class Sendfile(Setting):
name = "sendfile"
section = "Server Mechanics"
cli = ["--no-sendfile"]
validator = validate_bool
action = "store_false"
default = True
desc = """\
Disables the use of ``sendfile()``.
.. versionadded:: 18.0.0
Backported from 19.x
"""
class Chdir(Setting):
name = "chdir"
section = "Server Mechanics"
cli = ["--chdir"]
validator = validate_chdir
default = util.getcwd()
desc = """\
Chdir to specified directory before apps loading.
"""
class Daemon(Setting):
name = "daemon"
section = "Server Mechanics"
cli = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the Gunicorn process.
Detaches the server from the controlling terminal and enters the
background.
"""
class Env(Setting):
name = "raw_env"
action = "append"
section = "Server Mechanic"
cli = ["-e", "--env"]
meta = "ENV"
validator = validate_list_string
default = []
desc = """\
Set environment variable (key=value).
Pass variables to the execution environment. Ex.::
$ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app
and test for the foo variable environement in your application.
"""
class Pidfile(Setting):
name = "pidfile"
section = "Server Mechanics"
cli = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class User(Setting):
name = "user"
section = "Server Mechanics"
cli = ["-u", "--user"]
meta = "USER"
validator = validate_user
default = os.geteuid()
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getpwnam(value) or None to not change
the worker process user.
"""
class Group(Setting):
name = "group"
section = "Server Mechanics"
cli = ["-g", "--group"]
meta = "GROUP"
validator = validate_group
default = os.getegid()
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getgrnam(value) or None to not change
the worker processes group.
"""
class Umask(Setting):
name = "umask"
section = "Server Mechanics"
cli = ["-m", "--umask"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
A bit mask for the file mode on files written by Gunicorn.
Note that this affects unix socket permissions.
A valid value for the os.umask(mode) call or a string compatible with
int(value, 0) (0 means Python guesses the base, so values like "0",
"0xFF", "0022" are valid for decimal, hex, and octal representations)
"""
class TmpUploadDir(Setting):
name = "tmp_upload_dir"
section = "Server Mechanics"
meta = "DIR"
validator = validate_string
default = None
desc = """\
Directory to store temporary request data as they are read.
This may disappear in the near future.
This path should be writable by the process permissions set for Gunicorn
workers. If not specified, Gunicorn will choose a system generated
temporary directory.
"""
class SecureSchemeHeader(Setting):
name = "secure_scheme_headers"
section = "Server Mechanics"
validator = validate_dict
default = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-PROTO": "https",
"X-FORWARDED-SSL": "on"
}
desc = """\
A dictionary containing headers and values that the front-end proxy
uses to indicate HTTPS requests. These tell gunicorn to set
wsgi.url_scheme to "https", so your application can tell that the
request is secure.
The dictionary should map upper-case header names to exact string
values. The value comparisons are case-sensitive, unlike the header
names, so make sure they're exactly what your front-end proxy sends
when handling HTTPS requests.
It is important that your front-end proxy configuration ensures that
the headers defined here can not be passed directly from the client.
"""
class XForwardedFor(Setting):
name = "x_forwarded_for_header"
section = "Server Mechanics"
meta = "STRING"
validator = validate_string
default = 'X-FORWARDED-FOR'
desc = """\
Set the X-Forwarded-For header that identify the originating IP
address of the client connection to gunicorn via a proxy.
"""
class ForwardedAllowIPS(Setting):
name = "forwarded_allow_ips"
section = "Server Mechanics"
meta = "STRING"
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed to handle X-Forwarded-* headers.
(comma separate).
Set to "*" to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment)
"""
class AccessLog(Setting):
name = "accesslog"
section = "Logging"
cli = ["--access-logfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The Access log file to write to.
"-" means log to stderr.
"""
class AccessLogFormat(Setting):
name = "access_log_format"
section = "Logging"
cli = ["--access-logformat"]
meta = "STRING"
validator = validate_string
default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
desc = """\
The Access log format .
By default:
%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"
h: remote address
l: '-'
u: currently '-', may be user name in future releases
t: date of the request
r: status line (ex: GET / HTTP/1.1)
s: status
b: response length or '-'
f: referer
a: user agent
T: request time in seconds
D: request time in microseconds,
p: process ID
{Header}i: request header
{Header}o: response header
"""
class ErrorLog(Setting):
name = "errorlog"
section = "Logging"
cli = ["--error-logfile", "--log-file"]
meta = "FILE"
validator = validate_string
default = "-"
desc = """\
The Error log file to write to.
"-" means log to stderr.
"""
class Loglevel(Setting):
name = "loglevel"
section = "Logging"
cli = ["--log-level"]
meta = "LEVEL"
validator = validate_string
default = "info"
desc = """\
The granularity of Error log outputs.
Valid level names are:
* debug
* info
* warning
* error
* critical
"""
class LoggerClass(Setting):
name = "logger_class"
section = "Logging"
cli = ["--logger-class"]
meta = "STRING"
validator = validate_class
default = "simple"
desc = """\
The logger you want to use to log events in gunicorn.
The default class (``gunicorn.glogging.Logger``) handle most of
normal usages in logging. It provides error and access logging.
You can provide your own worker by giving gunicorn a
python path to a subclass like gunicorn.glogging.Logger.
Alternatively the syntax can also load the Logger class
with `egg:gunicorn#simple`
"""
class LogConfig(Setting):
name = "logconfig"
section = "Logging"
cli = ["--log-config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The log config file to use.
Gunicorn uses the standard Python logging module's Configuration
file format.
"""
class SyslogTo(Setting):
name = "syslog_addr"
section = "Logging"
cli = ["--log-syslog-to"]
meta = "SYSLOG_ADDR"
validator = validate_string
if PLATFORM == "darwin":
default = "unix:///var/run/syslog"
elif PLATFORM in ('freebsd', 'dragonfly', ):
default = "unix:///var/run/log"
elif PLATFORM == "openbsd":
default = "unix:///dev/log"
else:
default = "udp://localhost:514"
desc = """\
Address to send syslog messages
"""
class Syslog(Setting):
name = "syslog"
section = "Logging"
cli = ["--log-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Log to syslog.
"""
class SyslogPrefix(Setting):
name = "syslog_prefix"
section = "Logging"
cli = ["--log-syslog-prefix"]
meta = "SYSLOG_PREFIX"
validator = validate_string
default = None
desc = """\
makes gunicorn use the parameter as program-name in the syslog entries.
All entries will be prefixed by gunicorn.<prefix>. By default the program
name is the name of the process.
"""
class SyslogFacility(Setting):
name = "syslog_facility"
section = "Logging"
cli = ["--log-syslog-facility"]
meta = "SYSLOG_FACILITY"
validator = validate_string
default = "user"
desc = """\
Syslog facility name
"""
class EnableStdioInheritance(Setting):
name = "enable_stdio_inheritance"
section = "Logging"
cli = ["-R", "--enable-stdio-inheritance"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable stdio inheritance
Enable inheritance for stdio file descriptors in daemon mode.
Note: To disable the python stdout buffering, you can to set the user
environment variable ``PYTHONUNBUFFERED`` .
"""
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
cli = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Gunicorn you'll probably want to set a
name to tell them apart. This requires that you install the setproctitle
module.
It defaults to 'gunicorn'.
"""
class DefaultProcName(Setting):
name = "default_proc_name"
section = "Process Naming"
validator = validate_string
default = "gunicorn"
desc = """\
Internal setting that is adjusted for each type of application.
"""
class DjangoSettings(Setting):
name = "django_settings"
section = "Django"
cli = ["--settings"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
The Python path to a Django settings module. (deprecated)
e.g. 'myproject.settings.main'. If this isn't provided, the
DJANGO_SETTINGS_MODULE environment variable will be used.
**DEPRECATED**: use the --env argument instead.
"""
class PythonPath(Setting):
name = "pythonpath"
section = "Server Mechanics"
cli = ["--pythonpath"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A directory to add to the Python path.
e.g.
'/home/djangoprojects/myproject'.
"""
class Paste(Setting):
name = "paste"
section = "Server Mechanics"
cli = ["--paster"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
Load a paste.deploy config file.
"""
class OnStarting(Setting):
name = "on_starting"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def on_starting(server):
pass
default = staticmethod(on_starting)
desc = """\
Called just before the master process is initialized.
The callable needs to accept a single instance variable for the Arbiter.
"""
class OnReload(Setting):
name = "on_reload"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def on_reload(server):
pass
default = staticmethod(on_reload)
desc = """\
Called to recycle workers during a reload via SIGHUP.
The callable needs to accept a single instance variable for the Arbiter.
"""
class WhenReady(Setting):
name = "when_ready"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def when_ready(server):
pass
default = staticmethod(when_ready)
desc = """\
Called just after the server is started.
The callable needs to accept a single instance variable for the Arbiter.
"""
class Prefork(Setting):
name = "pre_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def pre_fork(server, worker):
pass
default = staticmethod(pre_fork)
desc = """\
Called just before a worker is forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class Postfork(Setting):
name = "post_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def post_fork(server, worker):
pass
default = staticmethod(post_fork)
desc = """\
Called just after a worker has been forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class PostWorkerInit(Setting):
name = "post_worker_init"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def post_worker_init(worker):
pass
default = staticmethod(post_worker_init)
desc = """\
Called just after a worker has initialized the application.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class PreExec(Setting):
name = "pre_exec"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def pre_exec(server):
pass
default = staticmethod(pre_exec)
desc = """\
Called just before a new master process is forked.
The callable needs to accept a single instance variable for the Arbiter.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def pre_request(worker, req):
worker.log.debug("%s %s" % (req.method, req.path))
default = staticmethod(pre_request)
desc = """\
Called just before a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class PostRequest(Setting):
name = "post_request"
section = "Server Hooks"
validator = validate_post_request
type = six.callable
def post_request(worker, req, environ, resp):
pass
default = staticmethod(post_request)
desc = """\
Called after a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class WorkerExit(Setting):
name = "worker_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def worker_exit(server, worker):
pass
default = staticmethod(worker_exit)
desc = """\
Called just after a worker has been exited.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
"""
class NumWorkersChanged(Setting):
name = "nworkers_changed"
section = "Server Hooks"
validator = validate_callable(3)
type = six.callable
def nworkers_changed(server, new_value, old_value):
pass
default = staticmethod(nworkers_changed)
desc = """\
Called just after num_workers has been changed.
The callable needs to accept an instance variable of the Arbiter and
two integers of number of workers after and before change.
If the number of workers is set for the first time, old_value would be
None.
"""
class ProxyProtocol(Setting):
name = "proxy_protocol"
section = "Server Mechanics"
cli = ["--proxy-protocol"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable detect PROXY protocol (PROXY mode).
Allow using Http and Proxy together. It's may be useful for work with
stunnel as https frondend and gunicorn as http server.
PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
Example for stunnel config::
[https]
protocol = proxy
accept = 443
connect = 80
cert = /etc/ssl/certs/stunnel.pem
key = /etc/ssl/certs/stunnel.key
"""
class ProxyAllowFrom(Setting):
name = "proxy_allow_ips"
section = "Server Mechanics"
cli = ["--proxy-allow-from"]
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed accept proxy requests (comma separate).
"""
class KeyFile(Setting):
name = "keyfile"
section = "Ssl"
cli = ["--keyfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL key file
"""
class CertFile(Setting):
name = "certfile"
section = "Ssl"
cli = ["--certfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL certificate file
"""
|
ammaraskar/gunicorn
|
gunicorn/config.py
|
Python
|
mit
| 38,001
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import tornado.web
from tornado import gen
from bson.objectid import ObjectId
from . import BaseHandler
from .utils import make_content
class UploadHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render('tool/upload_img.html')
@tornado.web.authenticated
def post(self):
try:
file_metas = self.request.files['img_file'][0]
except:
self.send_message('图片半路走丢了,再试一次吧')
self.render('account/upload.html')
return
with open(file_metas['filename'], 'w') as f:
f.write(file_metas['body'])
url = self.upload_img(file_metas['filename'], file_metas['filename'])
if not url:
os.remove(file_metas['filename'])
self.send_message('图片上传失败')
self.render('account/upload.html')
return
os.remove(file_metas['filename'])
self.send_message('图片上传成功,你的图片地址为:%s 把地址复制到主题里即可插入图片' % url, type='success')
self.render('tool/upload.html')
class NoteListHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
p = int(self.get_argument('p', 1))
notes = self.db.notes.find({'author': self.current_user['name']},
sort=[('created', -1)])
notes_count = notes.count()
per_page = 10
notes = notes[(p - 1) * per_page:p * per_page]
self.render('tool/note_list.html', notes=notes, notes_count=notes_count, p=p)
class NoteHandler(BaseHandler):
@tornado.web.authenticated
def get(self, note_id):
note = self.get_note(note_id)
self.render('tool/note.html', note=note)
class NewNoteHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render('tool/note_new.html')
@tornado.web.authenticated
@gen.coroutine
def post(self):
title = self.get_escaped_argument('title', None)
content = self.get_escaped_argument('content', None)
if not (title and content):
self.send_message('请完整填写信息喵')
if self.messages:
self.render('tool/note_new.html')
return
note = yield self.async_db.notes.find_one({
'title': title,
'content': content,
'author': self.current_user['name']
})
if note:
self.send_message('不要发布重复内容!')
self.redirect('/tool/note/%s' % note['_id'])
return
time_now = time.time()
content_html = make_content(content)
data = {
'title': title,
'content': content,
'content_html': content_html,
'author': self.current_user['name'],
'created': time_now,
'modify': None,
}
yield self.async_db.notes.insert(data)
self.redirect('/tool/note')
class EditNoteHandler(BaseHandler):
@tornado.web.authenticated
def get(self, note_id):
note = self.get_note(note_id)
self.render('tool/note_edit.html', note=note)
@tornado.web.authenticated
@gen.coroutine
def post(self, note_id):
note = self.get_note(note_id)
if not self.current_user['name'] == note['author']:
self.send_message('无权限!')
self.redirect('/tool/note')
return
title = self.get_escaped_argument('title', None)
content = self.get_escaped_argument('content', None)
if not (title and content):
self.send_message('请完整填写信息喵')
if self.messages:
self.render('tool/note_edit.html', note=note)
return
time_now = time.time()
content_html = make_content(content)
note['title'] = title
note['content'] = content
note['content_html'] = content_html
note['modify'] = time_now
yield self.async_db.notes.save(note)
self.send_message('修改成功!', type='success')
self.redirect('/tool/note/%s' % note_id)
class DelNoteHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self, note_id):
note = self.get_note(note_id)
if not self.current_user['name'] == note['author']:
self.send_message('无权限!')
self.redirect('/tool/note')
return
note_id = ObjectId(note_id)
yield self.async_db.notes.remove({'_id': note_id})
self.send_message('删除成功', type='success')
self.redirect('/tool/note')
handlers = [
(r'/tool/upload', UploadHandler),
(r'/tool/note', NoteListHandler),
(r'/tool/note/new', NewNoteHandler),
(r'/tool/note/(\w+)', NoteHandler),
(r'/tool/note/(\w+)/edit', EditNoteHandler),
(r'/tool/note/(\w+)/delete', DelNoteHandler),
]
|
JmPotato/College
|
handlers/tool.py
|
Python
|
mit
| 5,001
|
#
# This file is part of LiteSPI
#
# Copyright (c) 2020 Antmicro <www.antmicro.com>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from migen.genlib.cdc import MultiReg
from migen.genlib.misc import WaitTimer
from litespi.common import *
from litespi.clkgen import DDRLiteSPIClkGen
from litex.soc.interconnect import stream
from litex.soc.interconnect.csr import *
from litex.build.io import DDRTristate
from litex.soc.integration.doc import AutoDoc
# LiteSPI DDR PHY Core -----------------------------------------------------------------------------
class LiteSPIDDRPHYCore(Module, AutoCSR, AutoDoc):
"""LiteSPI PHY DDR instantiator
The ``DDRLiteSPIPHYCore`` class provides a generic PHY that can be connected to the ``LiteSPICore``.
It supports single/dual/quad/octal output reads from the flash chips.
You can use this class only with devices that supports the DDR primitives.
The following diagram shows how each clock configuration option relates to outputs and input sampling in DDR mode:
.. wavedrom:: ../../doc/ddr-timing-diagram.json
Parameters
----------
pads : Object
SPI pads description.
flash : SpiNorFlashModule
SpiNorFlashModule configuration object.
Attributes
----------
source : Endpoint(spi_phy2core_layout), out
Data stream.
sink : Endpoint(spi_core2phy_layout), in
Control stream.
cs : Signal(), in
Flash CS signal.
"""
def __init__(self, pads, flash, cs_delay, extra_latency=0):
self.source = source = stream.Endpoint(spi_phy2core_layout)
self.sink = sink = stream.Endpoint(spi_core2phy_layout)
self.cs = Signal()
if hasattr(pads, "miso"):
bus_width = 1
pads.dq = [pads.mosi, pads.miso]
else:
bus_width = len(pads.dq)
assert bus_width in [1, 2, 4, 8]
# Check if number of pads matches configured mode.
assert flash.check_bus_width(bus_width)
self.addr_bits = addr_bits = flash.addr_bits
self.ddr = ddr = flash.ddr
assert not ddr
# Clock Generator.
self.submodules.clkgen = clkgen = DDRLiteSPIClkGen(pads)
# CS control.
cs_timer = WaitTimer(cs_delay + 1) # Ensure cs_delay cycles between XFers.
cs_enable = Signal()
self.submodules += cs_timer
self.comb += cs_timer.wait.eq(self.cs)
self.comb += cs_enable.eq(cs_timer.done)
self.comb += pads.cs_n.eq(~cs_enable)
# I/Os.
data_bits = 32
dq_o = Array([Signal(len(pads.dq)) for _ in range(2)])
dq_i = Array([Signal(len(pads.dq)) for _ in range(2)])
dq_oe = Array([Signal(len(pads.dq)) for _ in range(2)])
for i in range(len(pads.dq)):
self.specials += DDRTristate(
io = pads.dq[i],
o1 = dq_o[0][i], o2 = dq_o[1][i],
oe1 = dq_oe[0][i], oe2 = dq_oe[1][i],
i1 = dq_i[0][i], i2 = dq_i[1][i]
)
# Data Shift Registers.
sr_cnt = Signal(8, reset_less=True)
sr_out_load = Signal()
sr_out_shift = Signal()
sr_out = Signal(len(sink.data), reset_less=True)
sr_in_shift = Signal()
sr_in = Signal(len(sink.data), reset_less=True)
# Data Out Shift.
self.comb += [
dq_oe[1].eq(sink.mask),
Case(sink.width, {
1: dq_o[1].eq(sr_out[-1:]),
2: dq_o[1].eq(sr_out[-2:]),
4: dq_o[1].eq(sr_out[-4:]),
8: dq_o[1].eq(sr_out[-8:]),
})
]
self.sync += If(sr_out_load,
sr_out.eq(sink.data << (len(sink.data) - sink.len))
)
self.sync += If(sr_out_shift,
dq_oe[0].eq(dq_oe[1]),
dq_o[0].eq(dq_o[1]),
Case(sink.width, {
1 : sr_out.eq(Cat(Signal(1), sr_out)),
2 : sr_out.eq(Cat(Signal(2), sr_out)),
4 : sr_out.eq(Cat(Signal(4), sr_out)),
8 : sr_out.eq(Cat(Signal(8), sr_out)),
})
)
# Data In Shift.
self.sync += If(sr_in_shift,
Case(sink.width, {
1 : sr_in.eq(Cat(dq_i[0][1], sr_in)), # 1: pads.miso
2 : sr_in.eq(Cat(dq_i[0][:2], sr_in)),
4 : sr_in.eq(Cat(dq_i[0][:4], sr_in)),
8 : sr_in.eq(Cat(dq_i[0][:8], sr_in)),
})
)
# FSM.
self.submodules.fsm = fsm = FSM(reset_state="WAIT-CMD-DATA")
fsm.act("WAIT-CMD-DATA",
# Stop Clk.
NextValue(clkgen.en, 0),
# Wait for CS and a CMD from the Core.
If(cs_enable & sink.valid,
# Load Shift Register Count/Data Out.
NextValue(sr_cnt, sink.len - sink.width),
sr_out_load.eq(1),
# Start XFER.
NextState("XFER")
)
)
fsm.act("XFER",
# Generate Clk.
NextValue(clkgen.en, 1),
# Data In Shift.
sr_in_shift.eq(1),
# Data Out Shift.
sr_out_shift.eq(1),
# Shift Register Count Update/Check.
NextValue(sr_cnt, sr_cnt - sink.width),
# End XFer.
If(sr_cnt == 0,
NextValue(sr_cnt, (2 + 2*extra_latency)*sink.width), # FIXME: Explain magic numbers.
NextState("XFER-END"),
),
)
fsm.act("XFER-END",
# Stop Clk.
NextValue(clkgen.en, 0),
# Data In Shift.
sr_in_shift.eq(1),
# Shift Register Count Update/Check.
NextValue(sr_cnt, sr_cnt - sink.width),
If(sr_cnt == 0,
sink.ready.eq(1),
NextState("SEND-STATUS-DATA"),
),
)
self.comb += source.data.eq(sr_in)
fsm.act("SEND-STATUS-DATA",
# Send Data In to Core and return to WAIT when accepted.
source.valid.eq(1),
source.last.eq(1),
If(source.ready,
NextState("WAIT-CMD-DATA"),
)
)
|
litex-hub/litespi
|
litespi/phy/generic_ddr.py
|
Python
|
bsd-2-clause
| 6,281
|
# -*- coding: UTF-8 -*-
'''
Created on 27.01.2012
@author: rack
'''
import logging
import random
from datetime import date
from interaction.irc.module import InteractiveModule, InteractiveModuleCommand, InteractiveModuleResponse
from components.topic import TopicNotFound, AdditionNotFound, NoAdditionAvailable, NoAffectedRows
#-------------------------------------------------------------------------------
# Constants
#-------------------------------------------------------------------------------
BOT_IS_OPERATOR = 2
RANDOM_YEAR_START = 1983
RANDOM_YEAR_END = 2020
DEFAULT_TOPIC = 'Willkommen im Sammelbecken für sozial Benachteiligte'
#-------------------------------------------------------------------------------
# Module 'Logic'
#-------------------------------------------------------------------------------
class Topic(InteractiveModule):
"""
This module provides topic functions
"""
def initialize(self):
"""
Initialize the module.
"""
self.me = self.client.me
self.logger = logging.getLogger('interaction.irc.topic')
self.component = self.client.bot.get_subsystem('topic-component')
def module_identifier(self):
"""
Declare the module identifier.
"""
return 'TopicMod'
def init_commands(self):
return [
InteractiveModuleCommand(
keyword='topic',
callback=self.display_current_topic
),
InteractiveModuleCommand(
keyword='settopic',
callback=self.set_new_topic,
pattern=r'^(.+)$',
syntaxhint='<new topic>'
),
InteractiveModuleCommand(
keyword='addtopic',
callback=self.add_new_addition,
pattern=r'^(.+)$',
syntaxhint='<addition>'
),
InteractiveModuleCommand(
keyword='deltopic',
callback=self.del_addition,
pattern=r'^(.+)$',
syntaxhint=r'<id>'
),
InteractiveModuleCommand(
keyword='listtopic',
callback=self.display_topic_additions
)
]
def display_current_topic(self, request):
"""
Display the current topic.
Usage: .topic
@return InteractiveModuleResponse
"""
response = InteractiveModuleResponse()
try:
topic = self.component.get_last_topic()
topic_string = self.component.create_topic_string(topic.text,topic.addition.text,topic.year)
response.add_line('{0} set by {1}'.format(topic_string,topic.user))
except TopicNotFound:
response.add_line("No topic available.")
return response
def set_new_topic(self, request):
"""
Change the topic of a channel.
Usage: .settopic <text|'reset'>
If the module receive the string 'reset', it will set the default topic
@param request: A runtime request of an InteractiveModule command.
@return InteractiveModuleResponse
"""
response = InteractiveModuleResponse()
#Normally, here I would check if there was set the mode +t in channel modes
#because if it was set, I won't need to check the userMode.
#But get_modes() is not implemnted, yet :(
#channelModes = channelObject.get_modes()
channel_object = self.usermgmt.chanlist.get(request.target)
userMode = channel_object.get_user_mode(self.me.source.nickname)
try:
if (userMode == BOT_IS_OPERATOR):
text = request.parameter[0]
if (text == 'reset'): #default topic
text = DEFAULT_TOPIC
addition = self.component.get_random_addition().text
year = random.randint(RANDOM_YEAR_START,RANDOM_YEAR_END)
topic_cmd = self.client.get_command('Topic').get_sender()
topic_cmd.channel = request.target
topic_cmd.topic = self.create_topic_string(text,addition,year)
topic_cmd.send()
self.component.insert_topic(text,addition,year,request.source.nickname)
else:
response.add_line("Bot needs to be an operator to do this.")
except NoAdditionAvailable:
response.add_line("There are no topic additions available at the moment.")
return response
def add_new_addition(self, request):
"""
Insert a new addition to database
Usage: .addtopic <addtion(text)>
@param request: A runtime request of an InteractiveModule command.
@return InteractiveModuleResponse
"""
response = InteractiveModuleResponse()
self.component.insert_addition(request.parameter[0],request.source.nickname)
response.add_line("The process was successful.")
return response
def del_addition(self, request):
"""
Delete a addition with the given id.
.deltopic <id>
@param request: A runtime request of an InteractiveModule command.
@return: InteractiveModuleResponse
"""
response = InteractiveModuleResponse()
try:
id = int(request.parameter[0])
self.component.delete_addition_by_id(id)
response.add_line("Delete was successful.")
except NoAffectedRows:
response.add_line("No entry was deleted.")
except ValueError:
response.add_line("Please enter a valid ID!")
return response
def display_topic_additions(self, request):
"""
Send a link to a list with all additions.
Usage: .listtopic
@return: InteractiveModuleResponse
"""
return InteractiveModuleResponse("www.derlinkfehltnoch.de")
def create_topic_string(self,text,addition,year):
"""
Return a formated topic string with text, addition and year
@param text: a topic text
@param addition: an addition text
@param year: a year for the second addition part
@return: formated string
"""
if (year <= date.today().year):
since_until = "since"
else:
since_until = "until"
return "127,1.:. Welcome� 7,1� 14,1F4,1=15,1O7,1=0,1P" \
+ "7,1=0,1T7,1=0,1I7,1=15O4,1=14,1N 7,1�" \
+ " 7,14Topic: {0} 47,1� {1} {2} {3}! .:.".format(text, addition, since_until, year)
|
msteinhoff/foption-bot
|
src/python/interaction/irc/modules/topic.py
|
Python
|
mit
| 7,648
|
# https://oj.leetcode.com/problems/gas-station/
class Solution:
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
n = len(gas)
if n == 0:
return -1
startIndex, total = 0, 0
for i in xrange(2*n):
j = i % n
if startIndex != i and startIndex == j:
break
total += gas[j] - cost[j]
if total < 0:
if i < n - 1:
startIndex = i + 1
total = 0
else:
startIndex = -1
break
return startIndex
s = Solution()
print s.canCompleteCircuit([1,2], [2,1])
print s.canCompleteCircuit([4], [5])
print s.canCompleteCircuit([5], [4])
|
yaoxuanw007/forfun
|
leetcode/python/gasStation.py
|
Python
|
mit
| 714
|
from pyqtgraph.Qt import QtGui, QtCore
from flow import *
import numpy as np
import pyo
from traits.api import Int
class BinauralBeat(Block):
volume = Input()
def __init__(self, **config):
super(BinauralBeat, self).__init__(**config)
self.server = pyo.Server(buffersize=1024).boot()
centerFreq = pyo.Sig(256)
binauralFreq = pyo.Sine(freq=0.05, add=13.5, mul=1.5)
left = pyo.Sine(freq=centerFreq - binauralFreq / 2)
right = pyo.Sine(freq=centerFreq + binauralFreq / 2)
left.out(chnl=0)
right.out(chnl=1)
#left = pyo.PinkNoise().mix(2).out()
import thread
thread.start_new_thread(self.server.start, ())
self.left = left
self.right = right
def process(self):
vol = float(self.volume.buffer[-1]) / 10
vol = min(vol, 1.0)
self.left.mul = self.right.mul = vol
class SMRFlow(object):
def init(self, context):
C3C4 = context.get_channel('Channel 1', color='red', label='Raw with 50/60 Hz Noise')
ch1 = DCBlock(C3C4).ac
#ch1 = NotchFilter(ch1)
#ch1 = BandPass(0.0, 35.0, input=ch1)
ch1 = DCBlock(ch1).ac
self.OSC1 = Oscilloscope('Raw Signal', channels=[ch1])
SMR = BandPass(11.5, 14.5, input=ch1, color='yellow')
Theta = BandPass(2, 6, input=ch1, order=6, color='orange')
hibeta = BandPass(23, 32, input=ch1, order=6, color='cyan')
SMR = Expression(lambda x: x*25, SMR)
SMR.output.color = 'yellow'
Theta.output.color='orange'
hibeta.output.color='cyan'
SMR_rms = RMS(SMR)
Theta_rms = RMS(Theta)
hibeta_rms = RMS(hibeta)
rms_channels = [SMR_rms, Theta_rms, hibeta_rms]
self.OSC2 = Oscilloscope('intensities', channels=rms_channels)
score = Expression(lambda L, T, H: 1 * (L - T /4 - H/2) / np.average([L, T, H]), SMR_rms, Theta_rms, hibeta_rms)
#score = Expression(lambda L, T, H: L - T /4 - H/2, SMR_rms, Theta_rms, hibeta_rms)
self.Lthr = Threshold('SMR', input=SMR_rms, mode='increase', auto_target=90)
self.Tthr = Threshold('Theta', input=Theta_rms, mode='decrease', auto_target=93)
self.Hthr = Threshold('Hi Beta', input=hibeta_rms, mode='decrease', auto_target=92)
enable = Expression(lambda *x: all(x), self.Lthr.passfail, self.Tthr.passfail, self.Hthr.passfail)
score = Expression(lambda x: 1.0 if x else 0.0, enable)
self.OSC3 = Oscilloscope('SMR Trendline', channels=[score], autoscale=False)
self.Spec = BarSpectrogram('Spectrogram', input=ch1, hi=125, yrange=10000)
import os
if 'MOVIE' in os.environ:
self.mplayer = MPlayerControl(os.environ['MOVIE'], enable=enable)
else:
self.bb = BinauralBeat(volume=score)
def widget(self):
w = QtGui.QWidget()
layout = QtGui.QGridLayout()
w.setLayout(layout)
layout.addWidget(self.OSC1.widget(), 0, 0)
layout.addWidget(self.OSC2.widget(), 1, 0)
layout.addWidget(self.OSC3.widget(), 2, 0)
layout.addWidget(self.Spec.widget(), 0, 1, 3, 3)
layout.addWidget(self.Lthr.widget(), 4, 1, 1, 1)
layout.addWidget(self.Tthr.widget(), 4, 2, 1, 1)
layout.addWidget(self.Hthr.widget(), 4, 3, 1, 1)
return w
def flow():
return SMRFlow()
|
strfry/OpenNFB
|
protocols/clear-windshield.py
|
Python
|
gpl-3.0
| 3,466
|
import OOMP
newPart = OOMP.oompItem(9178)
newPart.addTag("oompType", "OPAM")
newPart.addTag("oompSize", "MS08")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "KLMV358")
newPart.addTag("oompIndex", "01")
OOMP.parts.append(newPart)
|
oomlout/oomlout-OOMP
|
old/OOMPpart_OPAM_MS08_X_KLMV358_01.py
|
Python
|
cc0-1.0
| 246
|
"""Test that minimum and maximum window size can be set.
Expected behaviour:
One window will be opened. The window's dimensions will be printed
to the terminal. Initially the window has no minimum or maximum
size (besides any OS-enforced limit).
- press "n" to set the minimum size to be the current size.
- press "x" to set the maximum size to be the current size.
You should see a green border inside the window but no red.
Close the window or press ESC to end the test.
"""
import unittest
from pyglet import window
from pyglet.window import key
from tests.interactive.window import window_util
class WINDOW_SET_MIN_MAX_SIZE(unittest.TestCase):
def on_resize(self, width, height):
print('Window size is %dx%d.' % (width, height))
self.width, self.height = width, height
def on_key_press(self, symbol, modifiers):
if symbol == key.N:
self.w.set_minimum_size(self.width, self.height)
print('Minimum size set to %dx%d.' % (self.width, self.height))
elif symbol == key.X:
self.w.set_maximum_size(self.width, self.height)
print('Maximum size set to %dx%d.' % (self.width, self.height))
def test_min_max_size(self):
print(__doc__)
self.width, self.height = 200, 200
self.w = w = window.Window(self.width, self.height, resizable=True)
w.push_handlers(self)
while not w.has_exit:
w.dispatch_events()
window_util.draw_client_border(w)
w.flip()
w.close()
|
bitcraft/pyglet
|
tests/interactive/window/window_set_min_max_size.py
|
Python
|
bsd-3-clause
| 1,569
|
#!/usr/bin/env python
###########################################################################
#
# Copyright 2017 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
# File : gui_ramdumpParser.py
# Description:
# graphical user interface of ramdumpParser.
# Real parsing is operated at ramdumpParser.py
from __future__ import print_function
from tkinter import *
import tkinter.ttk
import tkinter.filedialog
import os
import tempfile
import subprocess
modes = (
("AssertLog",1),
("AssertLogFile",2),
("Ramdump",3),
("CallStackLog",4),
)
g_elfpath = "../../build/output/bin/tinyara"
file_data = 'HeapInfo'
class PathFrame(Frame):
def __init__(self, parent, labelname="path", path=None):
Frame.__init__(self, parent)
self.path = StringVar()
self.path.set(path)
self.labelname = labelname
self.initialize()
def initialize(self):
self.label = Label(self, text=self.labelname)
self.label.grid(column=0, row=0, sticky="EW")
self.entry = Entry(self, textvariable=self.path)
self.entry.grid(column=1, row=0, sticky="EW")
self.entry.bind("<Return>", self.OnPressEnter)
btn = Button(self, text="Browse", command=self.OnButtonClick)
btn.grid(column=2, row=0)
def OnButtonClick(self):
temp = tkinter.filedialog.askopenfilename(parent=self)
if len(temp) > 0:
self.path.set(temp)
def OnPressEnter(self,event):
self.path.set(self.entry.get())
class curry:
def __init__(self, func, *args, **kwargs):
self.func = func
self.pending = args[:]
self.kwargs=kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw=self.kwargs.copy()
kw.update(kwargs)
else:
kw=kwargs or self.kwargs
return self.func(*(self.pending+args), **kw)
class HeapInfo(Tk):
def __init__(self):
Tk.__init__(self)
self.wm_title("Heap Information")
WIDTH_SIZE = 180
MIN_SIZE = 100
frame = Frame(self, relief="solid", bd = 0, width = WIDTH_SIZE)
frame.pack(fill = "both")
self.heap_label = Label(frame, justify = 'left',anchor = 'nw', bg = 'white', padx = 1, bd = 1 , width = 50 , height = 4)
self.heap_label.pack(side = 'left')
frame = Frame(self, relief="solid", bd = 0, width = WIDTH_SIZE)
frame.pack(fill = "both")
label = Label(frame, text ='FREE', bg = "white", padx = 1, bd = 1 , width = 10)
label.pack(side = 'right')
label = Label(frame, text ='STACK', bg = "red", padx = 1, bd = 1 , width = 10)
label.pack(side = 'right')
label = Label(frame, text ='ALLOC', bg = "blue", padx = 1, bd = 1 , width = 10)
label.pack(side = 'right')
len = 0
remain = 0
frame = Frame(self, relief="solid", bd = 1, width = WIDTH_SIZE)
frame.pack(fill = "both")
with open(file_data) as f:
for line in f:
lines = line.split()
size = (int(lines[0]) + MIN_SIZE - 1) / MIN_SIZE
len += size + 1
if len > WIDTH_SIZE :
remain = len - WIDTH_SIZE
size -= remain
len = 0
separator = tkinter.ttk.Separator(frame, orient="vertical")
separator.pack(side = 'left')
# linens[0] = heap size, linens[1] = heap status, linens[2] = Mem address, linens[3] = pid, linens[4] = Owner
if lines[1] == '0': # alloc
alloc_button = Button(frame, bg = "blue", padx = 1, bd = 0, width = size, command = curry(self.alloc_event, lines[0], lines[2], lines[3], lines[4]))
alloc_button.pack(side = 'left')
elif lines[1] == '1': # stack
stack_button = Button(frame, bg = "red", padx = 1, bd = 0, width = size, command = curry(self.alloc_event, lines[0], lines[2], lines[3], lines[4]))
stack_button.pack(side = 'left')
else : # free
free_button = Button(frame, bg = "white", padx = 1, bd = 0, width = size, command = curry(self.free_event, lines[0], lines[2]))
free_button.pack(side = 'left')
while remain != 0 :
# Add new line
frame = Frame(self, relief = "solid", bd=1)
frame.pack(fill = "both")
if remain + 1 > WIDTH_SIZE :
size = WIDTH_SIZE - 1
else :
size = remain
len = remain + 1
if lines[1] == '0': # alloc
alloc_button1 = Button(frame, bg = "blue", padx = 1, bd = 0, width = size, command = curry(self.alloc_event, lines[0], lines[2], lines[3], lines[4]))
alloc_button1.pack(side = 'left')
elif lines[1] == '1': # stack
stack_button1 = Button(frame, bg = "red", padx = 1, bd = 0, width = size, command = curry(self.alloc_event, lines[0], lines[2], lines[3], lines[4]))
stack_button1.pack(side = 'left')
else : # free
free_button1 = Button(frame, bg = "white" , padx = 1, bd = 0, width = size, command = curry(self.free_event, lines[0], lines[2]))
free_button1.pack(side = 'left')
remain -= size
if len == WIDTH_SIZE :
# Add new line
frame = Frame(self, relief = "solid", bd = 1)
frame.pack(fill = "both")
len = 0
def alloc_event(self, Size, MemAddr, Pid, Owner):
self.heap_label['text'] = 'MemAddr : ' + MemAddr + '\n' + 'Size : ' + Size + '\n' + 'Pid : ' + Pid + '\n' + 'Owner : ' + Owner
def free_event(self, Size, MemAddr):
self.heap_label['text'] = 'MemAddr : ' + MemAddr + '\n' + 'Size : ' + Size
class DumpParser(Tk):
def __init__(self):
Tk.__init__(self)
self.modevar = IntVar()
self.modevar.set(1)
self.log = StringVar()
self.initialize()
def initialize(self):
self.elfpath = PathFrame(self, "ELF path ", g_elfpath)
self.elfpath.pack(anchor=W)
self.modeframe = Frame(self)
self.modeframe.pack(anchor=W)
for mode, val in modes:
Radiobutton(self.modeframe, text=mode, variable=self.modevar, value=val, command=self.OnRadioClick).pack(anchor=W)
self.dataframe = Frame(self)
self.dataframe.pack(anchor=W)
self.logtext = Text(self.dataframe)
self.logtext.pack(anchor=W)
self.logpath = PathFrame(self.dataframe, "AssertLogFile path")
self.ramdumppath = PathFrame(self.dataframe, "Ramdump path ")
btn = Button(self, text="Run DumpParser", command=self.RunDumpParser)
btn.pack(anchor=W)
def OnRadioClick(self):
self.logtext.pack_forget()
self.logpath.pack_forget()
self.ramdumppath.pack_forget()
if self.modevar.get() == 1:
self.logtext.pack(anchor=W)
elif self.modevar.get() == 2:
self.logpath.pack(anchor=W)
elif self.modevar.get() == 3:
self.ramdumppath.pack(anchor=W)
elif self.modevar.get() == 4:
self.logtext.pack(anchor=W)
def RunDumpParser(self):
resWin = Toplevel(self)
resWin.wm_title("Dump Information")
resText = Text(resWin)
resText.pack(fill = "both")
if self.modevar.get() == 1:
fd, path = tempfile.mkstemp()
try:
with os.fdopen(fd, 'w') as tmp:
tmp.write(self.logtext.get("1.0",END))
with os.popen("python ramdumpParser.py" +
" -e " + self.elfpath.path.get() +
" -t " + path) as fd:
output = fd.read()
resText.insert(INSERT, output)
finally:
os.remove(path)
elif self.modevar.get() == 2:
with os.popen("python ramdumpParser.py"
" -e " + self.elfpath.path.get()+
" -t " + self.logpath.path.get()) as fd:
output = fd.read()
resText.insert(INSERT, output)
elif self.modevar.get() == 3:
with os.popen("python ramdumpParser.py"
" -e " + self.elfpath.path.get()+
" -r " + self.ramdumppath.path.get()) as fd:
output = fd.read()
resText.insert(INSERT, output)
if os.path.isfile(file_data):
HeapInfo()
os.remove(file_data)
elif self.modevar.get() == 4:
text = self.logtext.get("1.0",END)
lines = [_f for _f in text.split("\n") if _f]
for line in lines:
addr_start = line.find("[<")
addr_end = line.find(">]")
addr = line[addr_start+2:addr_end]
cmd = ['addr2line', '-e', self.elfpath.path.get(), addr]
fd_popen = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
data = fd_popen.read()
resText.insert(INSERT, data)
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.realpath(__file__)))
app = DumpParser()
app.title("Ramdump Parser")
app.mainloop()
|
an4967/TizenRT
|
tools/dump_tool/gui_ramdumpParser.py
|
Python
|
apache-2.0
| 8,549
|
# coding:utf-8
"""
TokenStream represents a stream of tokens that a parser will consume.
TokenStream can be used to consume tokens, peek ahead, and synchonize to a
delimiter token. The tokens that the token stream operates on are either
compiled regular expressions or strings.
"""
import re
import six
class TokenStream(object):
"""
Represents the stream of tokens that the parser will consume. The token
stream can be used to consume tokens, peek ahead, and synchonize to a
delimiter token.
When the strem reaches its end, the position is placed
at one plus the position of the last token.
"""
def __init__(self, stream):
self.position = 0
self.stream = stream
def get_token(self, token, ngroup=None):
"""
Get the next token from the stream and advance the stream. Token can
be either a compiled regex or a string.
"""
# match single character
if isinstance(token, six.string_types) and len(token) == 1:
if self.peek() == token:
self.position += 1
return token
return None
# match a pattern
match = token.match(self.stream, self.position)
if match:
advance = match.end() - match.start()
self.position += advance
# if we are asking for a named capture, return jus that
if ngroup:
return match.group(ngroup)
# otherwise return the entire capture
return match.group()
return None
def end_of_stream(self):
"""
Check if the end of the stream has been reached, if it has, returns
True, otherwise false.
"""
if self.position >= len(self.stream):
return True
return False
def peek(self, token=None):
"""
Peek at the stream to see what the next token is or peek for a
specific token.
"""
# peek at whats next in the stream
if token is None:
if self.position < len(self.stream):
return self.stream[self.position]
else:
return None
# peek for a specific token
else:
match = token.match(self.stream, self.position)
if match:
return self.stream[match.start():match.end()]
return None
|
mailgun/flanker
|
flanker/addresslib/plugins/_tokenizer.py
|
Python
|
apache-2.0
| 2,400
|
import json
from collections import OrderedDict
from datetime import date, datetime, timedelta
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import Q, Sum
from django.template import loader
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext, ugettext_lazy as _
import six
import olympia.core.logger
from olympia import amo
from olympia.abuse.models import AbuseReport
from olympia.access import acl
from olympia.addons.models import Addon, Persona
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import cache_ns_key, send_mail
from olympia.files.models import FileValidation
from olympia.ratings.models import Rating
from olympia.reviewers.sql_model import RawSQLModel
from olympia.users.models import UserForeignKey, UserProfile
from olympia.versions.models import Version, version_uploaded
user_log = olympia.core.logger.getLogger('z.users')
log = olympia.core.logger.getLogger('z.reviewers')
VIEW_QUEUE_FLAGS = (
('needs_admin_code_review', 'needs-admin-code-review',
_('Needs Admin Code Review')),
('needs_admin_content_review', 'needs-admin-content-review',
_('Needs Admin Content Review')),
('needs_admin_theme_review', 'needs-admin-theme-review',
_('Needs Admin Static Theme Review')),
('is_jetpack', 'jetpack', _('Jetpack Add-on')),
('is_restart_required', 'is_restart_required', _('Requires Restart')),
('pending_info_request', 'info', _('More Information Requested')),
('expired_info_request', 'expired-info', _('Expired Information Request')),
('sources_provided', 'sources-provided', _('Sources provided')),
('is_webextension', 'webextension', _('WebExtension')),
)
def get_reviewing_cache_key(addon_id):
return 'review_viewing:{id}'.format(id=addon_id)
def clear_reviewing_cache(addon_id):
return cache.delete(get_reviewing_cache_key(addon_id))
def get_reviewing_cache(addon_id):
return cache.get(get_reviewing_cache_key(addon_id))
def set_reviewing_cache(addon_id, user_id):
# We want to save it for twice as long as the ping interval,
# just to account for latency and the like.
cache.set(get_reviewing_cache_key(addon_id),
user_id,
amo.REVIEWER_VIEWING_INTERVAL * 2)
@python_2_unicode_compatible
class CannedResponse(ModelBase):
id = PositiveAutoField(primary_key=True)
name = models.CharField(max_length=255)
response = models.TextField()
sort_group = models.CharField(max_length=255)
type = models.PositiveIntegerField(
choices=amo.CANNED_RESPONSE_CHOICES.items(), db_index=True, default=0)
class Meta:
db_table = 'cannedresponses'
def __str__(self):
return six.text_type(self.name)
def get_flags(addon, version):
"""Return a list of tuples (indicating which flags should be displayed for
a particular add-on."""
return [(cls, title) for (prop, cls, title) in VIEW_QUEUE_FLAGS
if getattr(version, prop, getattr(addon, prop, None))]
def get_flags_for_row(record):
"""Like get_flags(), but for the queue pages, using fields directly
returned by the queues SQL query."""
return [(cls, title) for (prop, cls, title) in VIEW_QUEUE_FLAGS
if getattr(record, prop)]
class ViewQueue(RawSQLModel):
id = models.IntegerField()
addon_name = models.CharField(max_length=255)
addon_slug = models.CharField(max_length=30)
addon_status = models.IntegerField()
addon_type_id = models.IntegerField()
needs_admin_code_review = models.NullBooleanField()
needs_admin_content_review = models.NullBooleanField()
needs_admin_theme_review = models.NullBooleanField()
is_restart_required = models.BooleanField()
is_jetpack = models.BooleanField()
source = models.CharField(max_length=100)
is_webextension = models.BooleanField()
latest_version = models.CharField(max_length=255)
pending_info_request = models.DateTimeField()
expired_info_request = models.NullBooleanField()
waiting_time_days = models.IntegerField()
waiting_time_hours = models.IntegerField()
waiting_time_min = models.IntegerField()
def base_query(self):
return {
'select': OrderedDict([
('id', 'addons.id'),
('addon_name', 'tr.localized_string'),
('addon_status', 'addons.status'),
('addon_type_id', 'addons.addontype_id'),
('addon_slug', 'addons.slug'),
('needs_admin_code_review',
'addons_addonreviewerflags.needs_admin_code_review'),
('needs_admin_content_review',
'addons_addonreviewerflags.needs_admin_content_review'),
('needs_admin_theme_review',
'addons_addonreviewerflags.needs_admin_theme_review'),
('latest_version', 'versions.version'),
('pending_info_request',
'addons_addonreviewerflags.pending_info_request'),
('expired_info_request', (
'TIMEDIFF(addons_addonreviewerflags.pending_info_request,'
'NOW()) < 0')),
('is_jetpack', 'MAX(files.jetpack_version IS NOT NULL)'),
('is_restart_required', 'MAX(files.is_restart_required)'),
('source', 'versions.source'),
('is_webextension', 'MAX(files.is_webextension)'),
('waiting_time_days',
'TIMESTAMPDIFF(DAY, MAX(versions.nomination), NOW())'),
('waiting_time_hours',
'TIMESTAMPDIFF(HOUR, MAX(versions.nomination), NOW())'),
('waiting_time_min',
'TIMESTAMPDIFF(MINUTE, MAX(versions.nomination), NOW())'),
]),
'from': [
'addons',
"""
LEFT JOIN addons_addonreviewerflags ON (
addons.id = addons_addonreviewerflags.addon_id)
LEFT JOIN versions ON (addons.id = versions.addon_id)
LEFT JOIN files ON (files.version_id = versions.id)
JOIN translations AS tr ON (
tr.id = addons.name
AND tr.locale = addons.defaultlocale)
"""
],
'where': [
'NOT addons.inactive', # disabled_by_user
'versions.channel = %s' % amo.RELEASE_CHANNEL_LISTED,
'files.status = %s' % amo.STATUS_AWAITING_REVIEW,
],
'group_by': 'id'}
@property
def sources_provided(self):
return bool(self.source)
@property
def flags(self):
return get_flags_for_row(self)
class ViewFullReviewQueue(ViewQueue):
def base_query(self):
q = super(ViewFullReviewQueue, self).base_query()
q['where'].append('addons.status = %s' % amo.STATUS_NOMINATED)
return q
class ViewPendingQueue(ViewQueue):
def base_query(self):
q = super(ViewPendingQueue, self).base_query()
q['where'].append('addons.status = %s' % amo.STATUS_PUBLIC)
return q
class ViewUnlistedAllList(RawSQLModel):
id = models.IntegerField()
addon_name = models.CharField(max_length=255)
addon_slug = models.CharField(max_length=30)
guid = models.CharField(max_length=255)
version_date = models.DateTimeField()
_author_ids = models.CharField(max_length=255)
_author_usernames = models.CharField()
review_date = models.DateField()
review_version_num = models.CharField(max_length=255)
review_log_id = models.IntegerField()
addon_status = models.IntegerField()
latest_version = models.CharField(max_length=255)
needs_admin_code_review = models.NullBooleanField()
needs_admin_content_review = models.NullBooleanField()
needs_admin_theme_review = models.NullBooleanField()
is_deleted = models.BooleanField()
def base_query(self):
review_ids = ','.join([str(r) for r in amo.LOG_REVIEWER_REVIEW_ACTION])
return {
'select': OrderedDict([
('id', 'addons.id'),
('addon_name', 'tr.localized_string'),
('addon_status', 'addons.status'),
('addon_slug', 'addons.slug'),
('latest_version', 'versions.version'),
('guid', 'addons.guid'),
('_author_ids', 'GROUP_CONCAT(authors.user_id)'),
('_author_usernames', 'GROUP_CONCAT(users.username)'),
('needs_admin_code_review',
'addons_addonreviewerflags.needs_admin_code_review'),
('needs_admin_content_review',
'addons_addonreviewerflags.needs_admin_content_review'),
('needs_admin_theme_review',
'addons_addonreviewerflags.needs_admin_theme_review'),
('is_deleted', 'IF (addons.status=11, true, false)'),
('version_date', 'versions.nomination'),
('review_date', 'reviewed_versions.created'),
('review_version_num', 'reviewed_versions.version'),
('review_log_id', 'reviewed_versions.log_id'),
]),
'from': [
'addons',
"""
JOIN (
SELECT MAX(id) AS latest_version, addon_id FROM versions
WHERE channel = {channel}
GROUP BY addon_id
) AS latest_version
ON latest_version.addon_id = addons.id
LEFT JOIN addons_addonreviewerflags ON (
addons.id = addons_addonreviewerflags.addon_id)
LEFT JOIN versions
ON (latest_version.latest_version = versions.id)
JOIN translations AS tr ON (
tr.id = addons.name AND
tr.locale = addons.defaultlocale)
LEFT JOIN addons_users AS authors
ON addons.id = authors.addon_id
LEFT JOIN users as users ON users.id = authors.user_id
LEFT JOIN (
SELECT versions.id AS id, addon_id, log.created, version,
log.id AS log_id
FROM versions
JOIN log_activity_version AS log_v ON (
log_v.version_id=versions.id)
JOIN log_activity as log ON (
log.id=log_v.activity_log_id)
WHERE log.user_id <> {task_user} AND
log.action in ({review_actions}) AND
versions.channel = {channel}
ORDER BY id desc
) AS reviewed_versions
ON reviewed_versions.addon_id = addons.id
""".format(task_user=settings.TASK_USER_ID,
review_actions=review_ids,
channel=amo.RELEASE_CHANNEL_UNLISTED),
],
'where': [
'NOT addons.inactive', # disabled_by_user
'versions.channel = %s' % amo.RELEASE_CHANNEL_UNLISTED,
"""((reviewed_versions.id = (select max(reviewed_versions.id)))
OR
(reviewed_versions.id IS NULL))
""",
'addons.status <> %s' % amo.STATUS_DISABLED
],
'group_by': 'id'}
@property
def authors(self):
ids = self._explode_concat(self._author_ids)
usernames = self._explode_concat(
self._author_usernames, cast=six.text_type)
return list(set(zip(ids, usernames)))
class PerformanceGraph(RawSQLModel):
id = models.IntegerField()
yearmonth = models.CharField(max_length=7)
approval_created = models.DateTimeField()
user_id = models.IntegerField()
total = models.IntegerField()
def base_query(self):
request_ver = amo.LOG.REQUEST_VERSION.id
review_ids = [str(r) for r in amo.LOG_REVIEWER_REVIEW_ACTION
if r != request_ver]
return {
'select': OrderedDict([
('yearmonth',
"DATE_FORMAT(`log_activity`.`created`, '%%Y-%%m')"),
('approval_created', '`log_activity`.`created`'),
('user_id', '`log_activity`.`user_id`'),
('total', 'COUNT(*)')
]),
'from': [
'log_activity',
],
'where': [
'log_activity.action in (%s)' % ','.join(review_ids),
'user_id <> %s' % settings.TASK_USER_ID # No auto-approvals.
],
'group_by': 'yearmonth, user_id'
}
class ReviewerSubscription(ModelBase):
user = models.ForeignKey(UserProfile)
addon = models.ForeignKey(Addon)
class Meta:
db_table = 'editor_subscriptions'
def send_notification(self, version):
user_log.info('Sending addon update notice to %s for %s' %
(self.user.email, self.addon.pk))
context = {
'name': self.addon.name,
'url': absolutify(reverse('addons.detail', args=[self.addon.pk],
add_prefix=False)),
'number': version.version,
'review': absolutify(reverse('reviewers.review',
args=[self.addon.pk],
add_prefix=False)),
'SITE_URL': settings.SITE_URL,
}
# Not being localised because we don't know the reviewer's locale.
subject = 'Mozilla Add-ons: %s Updated' % self.addon.name
template = loader.get_template('reviewers/emails/notify_update.ltxt')
send_mail(subject, template.render(context),
recipient_list=[self.user.email],
from_email=settings.ADDONS_EMAIL,
use_deny_list=False)
def send_notifications(signal=None, sender=None, **kw):
if sender.channel != amo.RELEASE_CHANNEL_LISTED:
return
subscribers = sender.addon.reviewersubscription_set.all()
if not subscribers:
return
for subscriber in subscribers:
user = subscriber.user
is_reviewer = (
user and not user.deleted and user.email and
acl.is_user_any_kind_of_reviewer(user))
if is_reviewer:
subscriber.send_notification(sender)
version_uploaded.connect(send_notifications, dispatch_uid='send_notifications')
class ReviewerScore(ModelBase):
id = PositiveAutoField(primary_key=True)
user = models.ForeignKey(UserProfile, related_name='_reviewer_scores')
addon = models.ForeignKey(Addon, blank=True, null=True, related_name='+')
version = models.ForeignKey(Version, blank=True, null=True,
related_name='+')
score = models.IntegerField()
# For automated point rewards.
note_key = models.SmallIntegerField(choices=amo.REVIEWED_CHOICES.items(),
default=0)
# For manual point rewards with a note.
note = models.CharField(max_length=255)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key: # Assuming we're invalidating the namespace.
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, version=None, post_review=False,
content_review=False):
"""Return the review event type constant.
This is determined by the addon.type and the queue the addon is
currently in (which is determined from the various parameters sent
down from award_points()).
Note: We're not using addon.status or addon.current_version because
this is called after the status/current_version might have been updated
by the reviewer action.
"""
reviewed_score_name = None
if content_review:
# Content review always gives the same amount of points.
reviewed_score_name = 'REVIEWED_CONTENT_REVIEW'
elif post_review:
# There are 4 tiers of post-review scores depending on the addon
# weight.
try:
if version is None:
raise AutoApprovalSummary.DoesNotExist
weight = version.autoapprovalsummary.weight
except AutoApprovalSummary.DoesNotExist as exception:
log.exception(
'No such version/auto approval summary when determining '
'event type to award points: %r', exception)
weight = 0
if addon.type == amo.ADDON_DICT:
reviewed_score_name = 'REVIEWED_DICT_FULL'
elif addon.type in [amo.ADDON_LPAPP, amo.ADDON_LPADDON]:
reviewed_score_name = 'REVIEWED_LP_FULL'
elif addon.type == amo.ADDON_SEARCH:
reviewed_score_name = 'REVIEWED_SEARCH_FULL'
elif weight > amo.POST_REVIEW_WEIGHT_HIGHEST_RISK:
reviewed_score_name = 'REVIEWED_EXTENSION_HIGHEST_RISK'
elif weight > amo.POST_REVIEW_WEIGHT_HIGH_RISK:
reviewed_score_name = 'REVIEWED_EXTENSION_HIGH_RISK'
elif weight > amo.POST_REVIEW_WEIGHT_MEDIUM_RISK:
reviewed_score_name = 'REVIEWED_EXTENSION_MEDIUM_RISK'
else:
reviewed_score_name = 'REVIEWED_EXTENSION_LOW_RISK'
else:
if status == amo.STATUS_NOMINATED:
queue = 'FULL'
elif status == amo.STATUS_PUBLIC:
queue = 'UPDATE'
else:
queue = ''
if (addon.type in [amo.ADDON_EXTENSION, amo.ADDON_PLUGIN,
amo.ADDON_API] and queue):
reviewed_score_name = 'REVIEWED_ADDON_%s' % queue
elif addon.type == amo.ADDON_DICT and queue:
reviewed_score_name = 'REVIEWED_DICT_%s' % queue
elif addon.type in [amo.ADDON_LPAPP, amo.ADDON_LPADDON] and queue:
reviewed_score_name = 'REVIEWED_LP_%s' % queue
elif addon.type == amo.ADDON_PERSONA:
reviewed_score_name = 'REVIEWED_PERSONA'
elif addon.type == amo.ADDON_STATICTHEME:
reviewed_score_name = 'REVIEWED_STATICTHEME'
elif addon.type == amo.ADDON_SEARCH and queue:
reviewed_score_name = 'REVIEWED_SEARCH_%s' % queue
elif addon.type == amo.ADDON_THEME and queue:
reviewed_score_name = 'REVIEWED_XUL_THEME_%s' % queue
if reviewed_score_name:
return getattr(amo, reviewed_score_name)
return None
@classmethod
def award_points(cls, user, addon, status, version=None,
post_review=False, content_review=False,
extra_note=''):
"""Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants.
`version` is the `Version` object that was affected by the review.
`post_review` is set to True if the add-on was auto-approved and the
reviewer is confirming/rejecting post-approval.
`content_review` is set to True if it's a content-only review of an
auto-approved add-on.
"""
# If a webextension file gets approved manually (e.g. because
# auto-approval is disabled), 'post-review' is set to False, treating
# the file as a legacy file which is not what we want. The file is
# still a webextension and should treated as such, regardless of
# auto-approval being disabled or not.
# As a hack, we set 'post_review' to True.
if (version and
version.is_webextension and
addon.type in amo.GROUP_TYPE_ADDON):
post_review = True
user_log.info(
(u'Determining award points for user %s for version %s of addon %s'
% (user, version, addon.id)).encode('utf-8'))
event = cls.get_event(
addon, status, version=version, post_review=post_review,
content_review=content_review)
score = amo.REVIEWED_SCORES.get(event)
user_log.info(
(u'Determined %s award points (event: %s) for user %s for version '
u'%s of addon %s' % (score, event, user, version, addon.id))
.encode('utf-8'))
# Add bonus to reviews greater than our limit to encourage fixing
# old reviews. Does not apply to content-review/post-review at the
# moment, because it would need to be calculated differently.
award_overdue_bonus = (
version and version.nomination and
not post_review and not content_review)
if award_overdue_bonus:
waiting_time_days = (datetime.now() - version.nomination).days
days_over = waiting_time_days - amo.REVIEWED_OVERDUE_LIMIT
if days_over > 0:
bonus = days_over * amo.REVIEWED_OVERDUE_BONUS
score = score + bonus
if score is not None:
cls.objects.create(user=user, addon=addon, score=score,
note_key=event, note=extra_note,
version=version)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s' % (
score, user, amo.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score
@classmethod
def award_moderation_points(cls, user, addon, review_id, undo=False):
"""Awards points to user based on moderated review."""
event = (amo.REVIEWED_ADDON_REVIEW if not undo else
amo.REVIEWED_ADDON_REVIEW_POORLY)
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, amo.REVIEWED_CHOICES[event], review_id))
@classmethod
def get_total(cls, user):
"""Returns total points by user."""
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = list(ReviewerScore.objects.filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5, addon_type=None):
"""Returns most recent ReviewerScore records."""
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.filter(user=user)
if addon_type is not None:
val.filter(addon__type=addon_type)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_breakdown(cls, user):
"""Returns points broken down by addon type."""
key = cls.get_key('get_breakdown:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`,
`addons`.`addontype_id` AS `atype`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
GROUP BY `addons`.`addontype_id`
ORDER BY `total` DESC
"""
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_breakdown_since(cls, user, since):
"""
Returns points broken down by addon type since the given datetime.
"""
key = cls.get_key('get_breakdown:%s:%s' % (user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`,
`addons`.`addontype_id` AS `atype`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
GROUP BY `addons`.`addontype_id`
ORDER BY `total` DESC
"""
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_list(cls, since=None, types=None, addon_type=None):
"""
Returns base leaderboard list. Each item will be a tuple containing
(user_id, name, total).
"""
reviewers = (UserProfile.objects
.filter(groups__name__startswith='Reviewers: ')
.exclude(groups__name__in=('Staff', 'Admins',
'No Reviewer Incentives'))
.distinct())
qs = (cls.objects
.values_list('user__id')
.filter(user__in=reviewers)
.annotate(total=Sum('score'))
.order_by('-total'))
if since is not None:
qs = qs.filter(created__gte=since)
if types is not None:
qs = qs.filter(note_key__in=types)
if addon_type is not None:
qs = qs.filter(addon__type=addon_type)
users = {reviewer.pk: reviewer for reviewer in reviewers}
return [
(item[0], users.get(item[0], UserProfile()).name, item[1])
for item in qs]
@classmethod
def get_leaderboards(cls, user, days=7, types=None, addon_type=None):
"""Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3.
"""
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = date.today() - timedelta(days=days)
leader_top = []
leader_near = []
leaderboard = cls._leaderboard_list(
since=week_ago, types=types, addon_type=addon_type)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(leaderboard, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5: # User is in top 5, show top 5.
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass # User is last on the leaderboard.
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
"""
Returns reviewers ordered by highest total points first.
"""
leaderboard = cls._leaderboard_list()
scores = []
for row in leaderboard:
user_id, name, total = row
user_level = len(amo.REVIEWED_LEVELS) - 1
for i, level in enumerate(amo.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
# Only show level if it changes.
if user_level < 0:
level = ''
else:
level = six.text_type(amo.REVIEWED_LEVELS[user_level]['name'])
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
class AutoApprovalNotEnoughFilesError(Exception):
pass
class AutoApprovalNoValidationResultError(Exception):
pass
@python_2_unicode_compatible
class AutoApprovalSummary(ModelBase):
"""Model holding the results of an auto-approval attempt on a Version."""
version = models.OneToOneField(
Version, on_delete=models.CASCADE, primary_key=True)
is_locked = models.BooleanField(default=False)
has_auto_approval_disabled = models.BooleanField(default=False)
verdict = models.PositiveSmallIntegerField(
choices=amo.AUTO_APPROVAL_VERDICT_CHOICES,
default=amo.NOT_AUTO_APPROVED)
weight = models.IntegerField(default=0)
confirmed = models.NullBooleanField(default=None)
class Meta:
db_table = 'editors_autoapprovalsummary'
def __str__(self):
return u'%s %s' % (self.version.addon.name, self.version)
def calculate_weight(self):
"""Calculate the weight value for this version according to various
risk factors, setting the weight property on the instance and returning
a dict of risk factors.
That value is then used in reviewer tools to prioritize add-ons in the
auto-approved queue."""
# Note: for the moment, some factors are in direct contradiction with
# the rules determining whether or not an add-on can be auto-approved
# in the first place, but we'll relax those rules as we move towards
# post-review.
addon = self.version.addon
one_year_ago = (self.created or datetime.now()) - timedelta(days=365)
six_weeks_ago = (self.created or datetime.now()) - timedelta(days=42)
factors = {
# Add-ons under admin code review: 100 added to weight.
'admin_code_review': 100 if addon.needs_admin_code_review else 0,
# Each abuse reports for the add-on or one of the listed developers
# in the last 6 weeks adds 15 to the weight, up to a maximum of
# 100.
'abuse_reports': min(
AbuseReport.objects
.filter(Q(addon=addon) | Q(user__in=addon.listed_authors))
.filter(created__gte=six_weeks_ago).count() * 15, 100),
# 1% of the total of "recent" ratings with a score of 3 or less
# adds 2 to the weight, up to a maximum of 100.
'negative_ratings': min(int(
Rating.objects
.filter(addon=addon)
.filter(rating__lte=3, created__gte=one_year_ago)
.count() / 100.0 * 2.0), 100),
# Reputation is set by admin - the value is inverted to add from
# -300 (decreasing priority for "trusted" add-ons) to 0.
'reputation': (
max(min(int(addon.reputation or 0) * -100, 0), -300)),
# Average daily users: value divided by 10000 is added to the
# weight, up to a maximum of 100.
'average_daily_users': min(
addon.average_daily_users // 10000, 100),
# Pas rejection history: each "recent" rejected version (disabled
# with an original status of null, so not disabled by a developer)
# adds 10 to the weight, up to a maximum of 100.
'past_rejection_history': min(
Version.objects
.filter(addon=addon,
files__reviewed__gte=one_year_ago,
files__original_status=amo.STATUS_NULL,
files__status=amo.STATUS_DISABLED)
.distinct().count() * 10, 100),
}
factors.update(self.calculate_static_analysis_weight_factors())
self.weight = sum(factors.values())
return factors
def calculate_static_analysis_weight_factors(self):
"""Calculate the static analysis risk factors, returning a dict of
risk factors.
Used by calculate_weight()."""
try:
innerhtml_count = self.count_uses_innerhtml(self.version)
unknown_minified_code_count = (
self.count_uses_unknown_minified_code(self.version))
factors = {
# Static analysis flags from linter:
# eval() or document.write(): 50.
'uses_eval_or_document_write': (
50 if self.count_uses_eval_or_document_write(self.version)
else 0),
# Implied eval in setTimeout/setInterval/ on* attributes: 5.
'uses_implied_eval': (
5 if self.count_uses_implied_eval(self.version)
else 0),
# innerHTML / unsafe DOM: 50+10 per instance.
'uses_innerhtml': (
50 + 10 * (innerhtml_count - 1) if innerhtml_count else 0),
# custom CSP: 90.
'uses_custom_csp': (
90 if self.count_uses_custom_csp(self.version)
else 0),
# nativeMessaging permission: 100.
'uses_native_messaging': (
100 if self.check_uses_native_messaging(self.version)
else 0),
# remote scripts: 100.
'uses_remote_scripts': (
100 if self.count_uses_remote_scripts(self.version)
else 0),
# violates mozilla conditions of use: 20.
'violates_mozilla_conditions': (
20 if self.count_violates_mozilla_conditions(self.version)
else 0),
# libraries of unreadable code: 100+10 per instance.
'uses_unknown_minified_code': (
100 + 10 * (unknown_minified_code_count - 1)
if unknown_minified_code_count else 0),
# Size of code changes: 5kB is one point, up to a max of 100.
'size_of_code_changes': min(
self.calculate_size_of_code_changes() // 5000, 100),
# Seems to be using a coinminer: 2000
'uses_coinminer': (
2000 if self.count_uses_uses_coinminer(self.version)
else 0),
}
except AutoApprovalNoValidationResultError:
# We should have a FileValidationResult... since we don't and
# something is wrong, increase the weight by 500.
factors = {
'no_validation_result': 500,
}
return factors
def find_previous_confirmed_version(self):
"""Return the most recent version in the add-on history that has been
confirmed, excluding the one this summary is about, or None if there
isn't one."""
addon = self.version.addon
try:
version = addon.versions.exclude(pk=self.version.pk).filter(
autoapprovalsummary__confirmed=True).latest()
except Version.DoesNotExist:
version = None
return version
def calculate_size_of_code_changes(self):
"""Return the size of code changes between the version being
approved and the previous public one."""
def find_code_size(version):
# There could be multiple files: if that's the case, take the
# total for all files and divide it by the number of files.
number_of_files = len(version.all_files) or 1
total_code_size = 0
for file_ in version.all_files:
data = json.loads(file_.validation.validation)
total_code_size += (
data.get('metadata', {}).get('totalScannedFileSize', 0))
return total_code_size // number_of_files
try:
old_version = self.find_previous_confirmed_version()
old_size = find_code_size(old_version) if old_version else 0
new_size = find_code_size(self.version)
except FileValidation.DoesNotExist:
raise AutoApprovalNoValidationResultError()
# We don't really care about whether it's a negative or positive change
# in size, we just need the absolute value (if there is no current
# public version, that value ends up being the total code size of the
# version we're approving).
return abs(old_size - new_size)
def calculate_verdict(self, dry_run=False, pretty=False):
"""Calculate the verdict for this instance based on the values set
on it and the current configuration.
Return a dict containing more information about what critera passed
or not."""
if dry_run:
success_verdict = amo.WOULD_HAVE_BEEN_AUTO_APPROVED
failure_verdict = amo.WOULD_NOT_HAVE_BEEN_AUTO_APPROVED
else:
success_verdict = amo.AUTO_APPROVED
failure_verdict = amo.NOT_AUTO_APPROVED
# Currently the only thing that can prevent approval are a reviewer
# lock and having auto-approval disabled flag set on the add-on.
verdict_info = {
'is_locked': self.is_locked,
'has_auto_approval_disabled': self.has_auto_approval_disabled,
}
if any(verdict_info.values()):
self.verdict = failure_verdict
else:
self.verdict = success_verdict
if pretty:
verdict_info = self.verdict_info_prettifier(verdict_info)
return verdict_info
@classmethod
def verdict_info_prettifier(cls, verdict_info):
"""Return a generator of strings representing the a verdict_info
(as computed by calculate_verdict()) in human-readable form."""
mapping = {
'is_locked': ugettext('Is locked by a reviewer.'),
'has_auto_approval_disabled': ugettext(
'Has auto-approval disabled flag set.')
}
return (mapping[key] for key, value in sorted(verdict_info.items())
if value)
@classmethod
def _count_linter_flag(cls, version, flag):
def _count_linter_flag_in_file(file_):
try:
validation = file_.validation
except FileValidation.DoesNotExist:
raise AutoApprovalNoValidationResultError()
validation_data = json.loads(validation.validation)
return sum(flag in message['id']
for message in validation_data.get('messages', []))
return max(_count_linter_flag_in_file(file_)
for file_ in version.all_files)
@classmethod
def _count_metadata_property(cls, version, prop):
def _count_property_in_linter_metadata_in_file(file_):
try:
validation = file_.validation
except FileValidation.DoesNotExist:
raise AutoApprovalNoValidationResultError()
validation_data = json.loads(validation.validation)
return len(validation_data.get(
'metadata', {}).get(prop, []))
return max(_count_property_in_linter_metadata_in_file(file_)
for file_ in version.all_files)
@classmethod
def count_uses_unknown_minified_code(cls, version):
return cls._count_metadata_property(version, 'unknownMinifiedFiles')
@classmethod
def count_violates_mozilla_conditions(cls, version):
return cls._count_linter_flag(version, 'MOZILLA_COND_OF_USE')
@classmethod
def count_uses_remote_scripts(cls, version):
return cls._count_linter_flag(version, 'REMOTE_SCRIPT')
@classmethod
def count_uses_eval_or_document_write(cls, version):
return (
cls._count_linter_flag(version, 'NO_DOCUMENT_WRITE') or
cls._count_linter_flag(version, 'DANGEROUS_EVAL'))
@classmethod
def count_uses_implied_eval(cls, version):
return cls._count_linter_flag(version, 'NO_IMPLIED_EVAL')
@classmethod
def count_uses_innerhtml(cls, version):
return cls._count_linter_flag(version, 'UNSAFE_VAR_ASSIGNMENT')
@classmethod
def count_uses_custom_csp(cls, version):
return cls._count_linter_flag(version, 'MANIFEST_CSP')
@classmethod
def count_uses_uses_coinminer(cls, version):
return cls._count_linter_flag(version, 'COINMINER_USAGE_DETECTED')
@classmethod
def check_uses_native_messaging(cls, version):
return any('nativeMessaging' in file_.webext_permissions_list
for file_ in version.all_files)
@classmethod
def check_is_locked(cls, version):
locked = get_reviewing_cache(version.addon.pk)
return bool(locked) and locked != settings.TASK_USER_ID
@classmethod
def check_has_auto_approval_disabled(cls, version):
return bool(version.addon.auto_approval_disabled)
@classmethod
def create_summary_for_version(cls, version, dry_run=False):
"""Create a AutoApprovalSummary instance in db from the specified
version.
Return a tuple with the AutoApprovalSummary instance as first item,
and a dict containing information about the auto approval verdict as
second item.
If dry_run parameter is True, then the instance is created/updated
normally but when storing the verdict the WOULD_ constants are used
instead.
If not using dry_run it's the caller responsability to approve the
version to make sure the AutoApprovalSummary is not overwritten later
when the auto-approval process fires again."""
if len(version.all_files) == 0:
raise AutoApprovalNotEnoughFilesError()
data = {
'version': version,
'is_locked': cls.check_is_locked(version),
'has_auto_approval_disabled': cls.check_has_auto_approval_disabled(
version)
}
instance = cls(**data)
verdict_info = instance.calculate_verdict(dry_run=dry_run)
instance.calculate_weight()
# We can't do instance.save(), because we want to handle the case where
# it already existed. So we put the verdict and weight we just
# calculated in data and use update_or_create().
data['verdict'] = instance.verdict
data['weight'] = instance.weight
instance, _ = cls.objects.update_or_create(
version=version, defaults=data)
return instance, verdict_info
@classmethod
def get_auto_approved_queue(cls, admin_reviewer=False):
"""Return a queryset of Addon objects that have been auto-approved but
not confirmed by a human yet."""
success_verdict = amo.AUTO_APPROVED
qs = (
Addon.objects.public()
.filter(
_current_version__autoapprovalsummary__verdict=success_verdict)
.exclude(
_current_version__autoapprovalsummary__confirmed=True)
)
if not admin_reviewer:
qs = qs.exclude(addonreviewerflags__needs_admin_code_review=True)
return qs
@classmethod
def get_content_review_queue(cls, admin_reviewer=False):
"""Return a queryset of Addon objects that have been auto-approved and
need content review."""
success_verdict = amo.AUTO_APPROVED
qs = (
Addon.objects.public()
.filter(
_current_version__autoapprovalsummary__verdict=success_verdict,
addonapprovalscounter__last_content_review=None)
)
if not admin_reviewer:
qs = qs.exclude(
addonreviewerflags__needs_admin_content_review=True)
return qs
class RereviewQueueThemeManager(ManagerBase):
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_queryset(self):
qs = super(RereviewQueueThemeManager, self).get_queryset()
if self.include_deleted:
return qs
else:
return qs.exclude(theme__addon__status=amo.STATUS_DELETED)
@python_2_unicode_compatible
class RereviewQueueTheme(ModelBase):
id = PositiveAutoField(primary_key=True)
theme = models.ForeignKey(Persona)
header = models.CharField(max_length=72, blank=True, default='')
# Holds whether this reuploaded theme is a duplicate.
dupe_persona = models.ForeignKey(Persona, null=True,
related_name='dupepersona')
# The order of those managers is very important: please read the lengthy
# comment above the Addon managers declaration/instantiation.
unfiltered = RereviewQueueThemeManager(include_deleted=True)
objects = RereviewQueueThemeManager()
class Meta:
db_table = 'rereview_queue_theme'
# This is very important: please read the lengthy comment in Addon.Meta
# description
base_manager_name = 'unfiltered'
def __str__(self):
return str(self.id)
@property
def header_path(self):
"""Return the path to the header image."""
return self.theme._image_path(self.header or self.theme.header)
@property
def footer_path(self):
"""Return the path to the optional footer image."""
footer = self.footer or self.theme.footer
return footer and self.theme._image_path(footer) or ''
@property
def header_url(self):
"""Return the url of the header imager."""
return self.theme._image_url(self.header or self.theme.header)
@property
def footer_url(self):
"""Return the url of the optional footer image."""
footer = self.footer or self.theme.footer
return footer and self.theme._image_url(footer) or ''
class ThemeLock(ModelBase):
id = PositiveAutoField(primary_key=True)
theme = models.OneToOneField('addons.Persona')
reviewer = UserForeignKey()
expiry = models.DateTimeField()
class Meta:
db_table = 'theme_locks'
@python_2_unicode_compatible
class Whiteboard(ModelBase):
addon = models.OneToOneField(
Addon, on_delete=models.CASCADE, primary_key=True)
private = models.TextField(blank=True)
public = models.TextField(blank=True)
class Meta:
db_table = 'review_whiteboard'
def __str__(self):
return u'[%s] private: |%s| public: |%s|' % (
self.addon.name, self.private, self.public)
|
wagnerand/olympia
|
src/olympia/reviewers/models.py
|
Python
|
bsd-3-clause
| 48,527
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('apimanager', '0011_auto_20151030_1604'),
]
operations = [
migrations.AlterField(
model_name='apipage',
name='api_path',
field=models.CharField(max_length=50, help_text='Actual public API root endpoint; example usage: api.hel.fi/{path}/', default='', verbose_name='Root path for API Management platform'),
),
]
|
City-of-Helsinki/devheldev
|
apimanager/migrations/0012_auto_20151103_1659.py
|
Python
|
agpl-3.0
| 554
|
import json
import logging
from django.core.serializers import serialize
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.views.generic import TemplateView, FormView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import logout
from django.conf import settings
from django.shortcuts import redirect
from django.utils.safestring import mark_safe
from django.utils.decorators import method_decorator
from django_tables2 import SingleTableView
from braces.views import GroupRequiredMixin
from public.views import RegisterView, RegisterTokenView, ConsentTokenView
from mysite.common import Button, LinkButton
from members.models import VisitorFees
from events.views.public_event_views import EventRegisterView, EventSeatingView, EventGuestsView
from pos.views.mixins import IpadRequiredMixin, read_cookie
from pos.tables import *
from pos.forms import TerminalForm, VisitorForm
from pos.services import create_transaction_from_receipt, build_pos_array, PosServicesError
from wagtailcache.cache import nocache_page
LONG_TIMEOUT = 120000
SHORT_TIMEOUT = 30000
PING_TIMEOUT = 60000
stdlogger = logging.getLogger(__name__)
class SetTerminalView(LoginRequiredMixin, GroupRequiredMixin, FormView):
""" Define the terminal and application and set cookies in the device """
template_name = "pos/set_terminal.html"
form_class = TerminalForm
group_required = "Pos"
def get_initial(self):
initial = super().get_initial()
system, terminal = read_cookie(self.request)
initial.update({"system": system, "terminal": terminal})
return initial
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["title"] = "Start POS on this device"
return context
def post(self, request, *args, **kwargs):
cookie = getattr(settings, "POS_COOKIE", None)
if "disable" in request.POST:
response = HttpResponseRedirect(reverse("pos_admin"))
response.delete_cookie(cookie)
return response
if "start" in request.POST:
terminal = request.POST["terminal"]
system = request.POST["system"]
response = HttpResponseRedirect(reverse("pos_start"))
max_age = 10 * 365 * 24 * 60 * 60
if cookie:
response.set_cookie(cookie, system + ";" + terminal, max_age=max_age)
response.set_cookie("terminal", terminal, max_age=max_age) # needed by pos
return response
return redirect("pos_admin")
@method_decorator(nocache_page, name="dispatch")
class DisabledView(TemplateView):
template_name = "pos/disabled.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
system, terminal = read_cookie(self.request)
context["system"] = system
context["terminal"] = terminal
context["meta"] = self.request.META["HTTP_USER_AGENT"]
return context
@method_decorator(nocache_page, name="dispatch")
class StartView(IpadRequiredMixin, TemplateView):
""" Member login or attended mode selection """
template_name = "pos/start.html"
system = ""
person_id = None
def dispatch(self, request, *args, **kwargs):
logout(request)
self.person_id = kwargs.pop("person_id", None)
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
request.session["person_id"] = self.person_id
request.session["app"] = None
request.session["attended"] = False
self.system, request.session["terminal"] = read_cookie(request)
if self.system:
return super().get(request, *args, **kwargs)
return redirect("pos_disabled")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
apps = PosApp.objects.filter(enabled=True, layout__item_type=ItemType.BAR)
if apps:
context["bar_app"] = apps[0]
apps = PosApp.objects.filter(enabled=True, layout__item_type=ItemType.TEAS)
if apps:
context["teas_app"] = apps[0]
context["apps"] = PosApp.objects.filter(enabled=True, layout_id=None)
context["bar_tickers"] = Ticker.objects.filter(bar=True)
context["main_tickers"] = Ticker.objects.filter(main=True)
context["is_bar"] = self.system == "bar"
context["urls"] = mark_safe(
json.dumps(
{
"ping": reverse("pos_ajax_ping"),
"items": reverse("pos_ajax_items"),
"start": reverse("pos_start"),
"redirect": reverse("pos_redirect", kwargs={"view": "xxxx", "person_id": "9999"}),
"event": reverse("pos_event_register", kwargs={"pk": "8888", "person_id": "9999"}),
"people": reverse("ajax-people"),
"adults": reverse("ajax-adults"),
"password": reverse("ajax-password"),
"dob": reverse("ajax-dob"),
"postCode": reverse("ajax-postcode"),
"setPin": reverse("ajax-set-pin"),
"transactions": reverse("pos_transactions"),
"transactionsPerson": reverse("pos_transactions_person", kwargs={"person_id": "9999"}),
"transactionsComp": reverse("pos_transactions_comp"),
"transactionsCash": reverse("pos_transactions_cash"),
}
)
)
context["rows"], _ = build_pos_array()
return context
def post(self, request, *args, **kwargs):
""" Write transaction to database"""
if request.is_ajax():
receipt = json.loads(request.body)
pay_record = receipt.pop()
creation_date = datetime.fromtimestamp(pay_record["stamp"] / 1000, tz=timezone.get_current_timezone())
system, terminal = read_cookie(request)
existing = Transaction.objects.filter(creation_date=creation_date, terminal=terminal)
if not existing:
try:
trans = create_transaction_from_receipt(
None,
pay_record["terminal"],
pay_record["layout_id"],
receipt,
pay_record["total"],
pay_record["people"],
pay_record["attended"],
creation_date=creation_date,
)
except PosServicesError:
return HttpResponse(status=500)
return HttpResponse(f"Saved;{trans[0]};{trans[1]}")
return HttpResponse(f"Exists;{existing[0].id};{existing[0].total}")
# should not get here - all posts are ajax
return redirect("pos_start")
@method_decorator(nocache_page, name="dispatch")
class PosRegisterView(IpadRequiredMixin, RegisterView):
""" When user is not registered we use the public
registration form and override some things """
template_name = "pos/register.html"
re_register = False
def get_initial(self):
initial = super(PosRegisterView, self).get_initial()
self.person = Person.objects.get(pk=self.request.session["person_id"])
initial["first_name"] = self.person.first_name
initial["last_name"] = self.person.last_name
return initial
def get_form_kwargs(self):
""" set form kwargs so the name fields are hidden """
kwargs = super().get_form_kwargs()
kwargs.update({"hide_name": True})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["buttons"] = [
Button("Back", "back", css_class="btn-success btn-lg"),
Button("Next", "register", css_class="btn-success btn-lg"),
]
return add_context(context, self.request)
def post(self, request, *args, **kwargs):
if "back" in request.POST:
return redirect("pos_start")
if self.re_register:
person = Person.objects.get(pk=self.request.session["person_id"])
person.unregister()
return super().post(request, args, kwargs)
def get_success_url_name(self):
return "pos_register_token"
def get_failure_url_name(self):
return "pos_start"
@method_decorator(nocache_page, name="dispatch")
class PosRegisterTokenView(IpadRequiredMixin, RegisterTokenView):
"""
Get username, PIN and password
"""
template_name = "pos/register_token.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return add_context(context, self.request)
def get_success_url_name(self, **kwargs):
return "pos_consent_token"
def get_already_registered_url_name(self):
return "pos_password"
@method_decorator(nocache_page, name="dispatch")
class PosConsentView(IpadRequiredMixin, ConsentTokenView):
template_name = "pos/consent.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["pos"] = True
return add_context(context, self.request)
def get_success_url(self):
return reverse(menu_url(self.request))
@method_decorator(nocache_page, name="dispatch")
class VisitorBookView(IpadRequiredMixin, SingleTableView):
"""
List visitors book
This can be for all entries or for a specific person (not necessary the one logged in)
"""
model = VisitorBook
table_class = VisitorBookTable
template_name = "pos/visitor_book_pos.html"
table_pagination = {"per_page": 10}
id = None
all_entries = False
def get_table_data(self):
self.id = self.kwargs.get("person_id", None)
if self.all_entries:
qs = VisitorBook.objects.all()
else:
qs = VisitorBook.objects.filter(member_id=self.id)
return list(qs.order_by("-date", "-id").select_related("visitor").select_related("member__membership"))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["all_entries"] = self.all_entries
if self.id:
person = Person.objects.get(pk=self.id)
context["person"] = person
return add_context(context, self.request)
@method_decorator(nocache_page, name="dispatch")
class VisitorMenuView(IpadRequiredMixin, TemplateView):
template_name = "pos/visitor_menu.html"
form_class = VisitorForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return add_context(context, self.request)
@method_decorator(nocache_page, name="dispatch")
class VisitorCreateView(IpadRequiredMixin, FormView):
"""
Add an adult or junior visitor to the visitor book
"""
template_name = "pos/visitor_create.html"
form_class = VisitorForm
junior = False
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.person = Person.objects.get(pk=self.request.session["person_id"])
# if not self.admin:
kwargs.update({"person_id": self.person.id, "junior": self.junior})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["has_existing"] = len(context["form"].fields["visitors"].choices) > 1
context["junior"] = self.junior
return add_context(context, self.request)
def form_invalid(self, form):
return super().form_invalid(form)
def form_valid(self, form):
member_id = form.cleaned_data["person_id"]
if not member_id:
member_id = self.request.session["person_id"]
VisitorCreateView.process_form(form, member_id)
return redirect("pos_visitors_person", person_id=member_id)
@staticmethod
def process_form(form, member_id):
""" Create book entry for visitor. Static so it can also be used by admin view """
visitor_id = form.cleaned_data.get("visitors")
first_name = form.cleaned_data.get("first_name", "")
last_name = form.cleaned_data.get("last_name", "")
junior = form.cleaned_data.get("junior", False)
fees = VisitorFees.objects.filter(year=Settings.current_year())
fee = 6
if fees:
fee = fees[0].junior_fee if junior else fees[0].adult_fee
if visitor_id == "0":
# user entered name - check its not a dup
existing_visitors = Visitor.objects.filter(first_name=first_name, last_name=last_name, junior=junior)
if existing_visitors.count() > 0:
visitor = existing_visitors[0]
else:
visitor = Visitor.objects.create(first_name=first_name, last_name=last_name, junior=junior)
visitor_id = visitor.id
else:
visitor = Visitor.objects.get(id=visitor_id)
entry = VisitorBook.objects.create(member_id=member_id, visitor_id=visitor_id, fee=fee, billed=False)
return f"Visitor: {visitor.fullname} Fee: {fee}"
@method_decorator(nocache_page, name="dispatch")
class LookupMemberView(IpadRequiredMixin, TemplateView):
template_name = "pos/lookup_member.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return add_context(context, self.request)
@method_decorator(nocache_page, name="dispatch")
class PosEventRegisterView(IpadRequiredMixin, EventRegisterView):
""" Override the standard event register form"""
template_name = "pos/event_register.html"
def get_template_names(self):
return ["pos/event_register.html"]
def get_link_buttons(self):
buttons = []
buttons.append(
LinkButton("Group seating", reverse(f"pos_event_seating", kwargs={"participant_id": self.participant.id}))
)
if self.participant.tickets > 1:
buttons.append(
LinkButton(
"Guest names", reverse(f"pos_event_guests", kwargs={"participant_id": self.participant.id})
)
)
return buttons
def post(self, request, *args, **kwargs):
self.person = Person.objects.get(id=self.request.session["person_id"])
return super().post(request, args, kwargs)
def get_success_url(self):
if self.event.allow_groups:
if self.participant.tickets > 1:
return reverse("pos_event_guests", kwargs={"participant_id": self.participant.id})
else:
return reverse("pos_event_seating", kwargs={"participant_id": self.participant.id})
return reverse("pos_event_register", kwargs={"pk": self.event.id})
@method_decorator(nocache_page, name="dispatch")
class PosEventGuestsView(IpadRequiredMixin, EventGuestsView):
""" Override the guests view"""
template_name = "pos/event_guests.html"
def get_success_url(self):
return reverse("pos_event_seating", kwargs={"participant_id": self.participant.id})
@method_decorator(nocache_page, name="dispatch")
class PosEventSeatingView(IpadRequiredMixin, EventSeatingView):
""" Override the group seating view"""
template_name = "pos/event_seating.html"
def get_success_url(self):
return reverse("pos_start")
@nocache_page
def pos_redirect(request, view, person_id):
return redirect(view, person_id=person_id)
@nocache_page
def ajax_items(request):
""" Responds to ajax request for item list"""
if request.is_ajax() and request.method == "GET":
dict = {}
dict["items"] = serialize("json", Item.objects.all())
dict["colours"] = serialize("json", Colour.objects.all())
layouts = Layout.objects.all()
layout_dict = {}
for layout in layouts:
locs_dict = {}
locs = Location.objects.filter(layout_id=layout.id)
for loc in locs:
key = f"#btn{loc.row}{loc.col}"
if loc.col == 0:
locs_dict[key] = loc.description
else:
locs_dict[key] = loc.item_id
layout_dict[layout.id] = json.dumps(locs_dict)
dict["layouts"] = layout_dict
return JsonResponse(dict, safe=False)
return HttpResponseBadRequest
@nocache_page
def ajax_colours(request):
""" Responds to ajax request for item colours """
if request.is_ajax() and request.method == "GET":
data = serialize("json", Colour.objects.all())
return JsonResponse(data, safe=False)
return HttpResponseBadRequest
@nocache_page
def ajax_locations(request):
""" Return a dictionary of locations that have an associated item """
if request.is_ajax() and request.method == "GET":
id = request.GET.get("layout_id", None)
if id:
dict = {}
locations = Location.objects.filter(layout_id=id)
for loc in locations:
key = f"#btn{loc.row}{loc.col}"
if loc.col == 0:
dict[key] = loc.description
else:
dict[key] = loc.item_id
return JsonResponse(dict)
return HttpResponseBadRequest
@nocache_page
def ajax_ping(request):
""" responds to keep alive from pos"""
if request.is_ajax() and request.method == "POST":
terminal = request.POST.get("terminal", None)
if terminal:
records = PosPing.objects.filter(terminal=terminal)
if records:
records[0].time = timezone.now()
records[0].save()
else:
record = PosPing.objects.create(terminal=terminal, time=timezone.now())
return HttpResponse("OK")
return HttpResponse("Bad terminal")
return HttpResponseBadRequest
@nocache_page
def menu_url(request):
if request.session["app"].is_visitors_app():
return "pos_visitors_all"
return "pos_start_person"
def add_context(context, request, timeout=LONG_TIMEOUT):
id = request.session.get("person_id", None)
if id and int(id) != -1:
person = Person.objects.get(pk=id)
context["person_id"] = int(id)
context["person"] = person
context["full_name"] = person.fullname
if person.auth:
context["admin"] = person.auth.is_staff or person.auth.groups.filter(name="Pos").exists()
else:
context["person_id"] = -1
context["full_name"] = "Complimentary"
context["timeout_url"] = reverse("pos_start")
context["timeout"] = timeout
return context
def is_integer_string(s):
try:
int(s)
return True
except ValueError:
return False
|
ianastewart/cwltc-admin
|
pos/views/ipad_views.py
|
Python
|
mit
| 18,953
|
# -*- coding: utf-8 -*-
"""
Created on Mar 13, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
----------------------------------------------------------------------------
This file contains handlers related to the file sharing functionality
"""
import logging
from models.FileUpload import FileUpload
from models.Team import Team
from libs.ValidationError import ValidationError
from libs.SecurityDecorators import authenticated
from .BaseHandlers import BaseHandler
from tornado.options import options
from builtins import str
MAX_UPLOADS = 5
class FileUploadHandler(BaseHandler):
""" Handles file shares for teams """
@authenticated
def get(self, *args, **kwargs):
if options.team_sharing:
""" Renders upload file page """
user = self.get_current_user()
self.render(
"file_upload/shared_files.html", errors=None, shares=user.team.files
)
else:
self.redirect("/404")
@authenticated
def post(self, *args, **kwargs):
if options.team_sharing:
""" Shit form validation """
user = self.get_current_user()
self.errors = []
shares = []
if user.team:
shares = user.team.files
if hasattr(self.request, "files"):
teams = []
if user.is_admin():
teamval = self.get_argument("team_uuid", "")
if teamval == "all":
teams = Team.all()
elif teamval != "":
teams = [Team.by_uuid(teamval)]
shares = Team.by_uuid(teamval).files
else:
teams = [user.team]
for team in teams:
for shared_file in self.request.files["files"][:MAX_UPLOADS]:
file_upload = self.create_file(team, shared_file)
if file_upload is not None:
self.event_manager.team_file_shared(user, team, file_upload)
if not len(self.errors):
if user.is_admin():
self.redirect("/admin/view/fileshare")
else:
self.redirect("/user/share/files")
else:
self.render(
"file_upload/shared_files.html",
errors=self.errors,
shares=shares,
)
else:
self.render(
"file_upload/shared_files.html",
errors=["No files in request"],
shares=shares,
)
else:
self.redirect("/404")
def create_file(self, team, shared_file):
if options.team_sharing:
""" Saves uploaded file """
try:
file_upload = FileUpload(team_id=team.id)
file_upload.file_name = shared_file["filename"]
file_upload.data = shared_file["body"]
file_upload.description = self.get_argument("description", "")
self.dbsession.add(file_upload)
self.dbsession.commit()
return file_upload
except ValidationError as error:
self.errors.append(str(error))
else:
self.redirect("/404")
class FileDownloadHandler(BaseHandler):
""" Download shared files from here """
@authenticated
def get(self, *args, **kwargs):
if options.team_sharing:
""" Get a file and send it to the user """
user = self.get_current_user()
shared_file = FileUpload.by_uuid(self.get_argument("uuid", ""))
if user.is_admin() or (
shared_file is not None and shared_file in user.team.files
):
self.set_header("Content-Type", shared_file.content_type)
self.set_header("Content-Length", shared_file.byte_size)
self.set_header(
"Content-Disposition",
"attachment; filename=%s" % (shared_file.file_name),
)
self.write(shared_file.data)
else:
self.render("public/404.html")
else:
self.redirect("/404")
class FileDeleteHandler(BaseHandler):
""" Delete shared files """
@authenticated
def post(self, *args, **kwargs):
if options.team_sharing:
user = self.get_current_user()
shared_file = FileUpload.by_uuid(self.get_argument("uuid", ""))
if user.is_admin():
logging.info(
"%s deleted a shared file %s" % (user.handle, shared_file.uuid)
)
shared_file.delete_data()
self.dbsession.delete(shared_file)
self.dbsession.commit()
self.redirect("/admin/view/fileshare")
elif shared_file is not None and shared_file in user.team.files:
logging.info(
"%s deleted a shared file %s" % (user.handle, shared_file.uuid)
)
shared_file.delete_data()
self.dbsession.delete(shared_file)
self.dbsession.commit()
self.redirect("/user/share/files")
else:
self.redirect("/404")
else:
self.redirect("/404")
|
moloch--/RootTheBox
|
handlers/FileUploadHandlers.py
|
Python
|
apache-2.0
| 6,090
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
import sys
import mox
from testtools import matchers
import testscenarios
from oslo.config import cfg
from heat.engine import environment
from heat.common import exception
from heat.common import urlfetch
from heat.tests import fakes as test_fakes
from heat.tests.v1_1 import fakes
import heat.rpc.api as engine_api
import heat.db.api as db_api
from heat.common import identifier
from heat.common import template_format
from heat.engine import dependencies
from heat.engine import parser
from heat.engine.resource import _register_class
from heat.engine import service
from heat.engine.properties import Properties
from heat.engine import resource as res
from heat.engine.resources import instance as instances
from heat.engine.resources import nova_utils
from heat.engine import resource as rsrs
from heat.engine import watchrule
from heat.openstack.common import threadgroup
from heat.tests.common import HeatTestCase
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
load_tests = testscenarios.load_tests_apply_scenarios
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
nested_alarm_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: https://server.test/alarm.template
'''
alarm_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "alarming",
"Resources" : {
"service_alarm": {
"Type": "AWS::CloudWatch::Alarm",
"Properties": {
"EvaluationPeriods": "1",
"AlarmActions": [],
"AlarmDescription": "do the thing",
"Namespace": "dev/null",
"Period": "300",
"ComparisonOperator": "GreaterThanThreshold",
"Statistic": "SampleCount",
"Threshold": "2",
"MetricName": "ServiceFailure"
}
}
}
}
'''
policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "alarming",
"Resources" : {
"WebServerScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : "",
"Cooldown" : "60",
"ScalingAdjustment" : "-1"
}
}
}
}
'''
def get_wordpress_stack(stack_name, ctx):
t = template_format.parse(wp_template)
template = parser.Template(t)
stack = parser.Stack(ctx, stack_name, template,
environment.Environment({'KeyName': 'test'}))
return stack
def get_stack(stack_name, ctx, template):
t = template_format.parse(template)
template = parser.Template(t)
stack = parser.Stack(ctx, stack_name, template)
return stack
def setup_keystone_mocks(mocks, stack):
fkc = test_fakes.FakeKeystoneClient()
mocks.StubOutWithMock(stack.clients, 'keystone')
stack.clients.keystone().MultipleTimes().AndReturn(fkc)
def setup_mocks(mocks, stack):
fc = fakes.FakeClient()
mocks.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().MultipleTimes().AndReturn(fc)
setup_keystone_mocks(mocks, stack)
instance = stack['WebServer']
user_data = instance.properties['UserData']
server_userdata = nova_utils.build_userdata(instance, user_data)
instance.mime_string = server_userdata
mocks.StubOutWithMock(fc.servers, 'create')
fc.servers.create(image=744, flavor=3, key_name='test',
name=utils.PhysName(stack.name, 'WebServer'),
security_groups=None,
userdata=server_userdata, scheduler_hints=None,
meta=None, nics=None,
availability_zone=None).AndReturn(
fc.servers.list()[-1])
return fc
def setup_stack(stack_name, ctx, create_res=True):
stack = get_wordpress_stack(stack_name, ctx)
stack.store()
if create_res:
m = mox.Mox()
setup_mocks(m, stack)
m.ReplayAll()
stack.create()
m.UnsetStubs()
return stack
def clean_up_stack(stack, delete_res=True):
if delete_res:
m = mox.Mox()
fc = fakes.FakeClient()
m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().MultipleTimes().AndReturn(fc)
m.StubOutWithMock(fc.client, 'get_servers_9999')
get = fc.client.get_servers_9999
get().AndRaise(service.clients.novaclient.exceptions.NotFound(404))
m.ReplayAll()
stack.delete()
if delete_res:
m.UnsetStubs()
def stack_context(stack_name, create_res=True):
"""
Decorator which creates a stack by using the test case's context and
deletes it afterwards to ensure tests clean up their stacks regardless
of test success/failure
"""
def stack_delete(test_fn):
@functools.wraps(test_fn)
def wrapped_test(test_case, *args, **kwargs):
def create_stack():
ctx = getattr(test_case, 'ctx', None)
if ctx is not None:
stack = setup_stack(stack_name, ctx, create_res)
setattr(test_case, 'stack', stack)
def delete_stack():
stack = getattr(test_case, 'stack', None)
if stack is not None and stack.id is not None:
clean_up_stack(stack, delete_res=create_res)
create_stack()
try:
test_fn(test_case, *args, **kwargs)
except:
exc_class, exc_val, exc_tb = sys.exc_info()
try:
delete_stack()
finally:
raise exc_class, exc_val, exc_tb
else:
delete_stack()
return wrapped_test
return stack_delete
class DummyThreadGroup(object):
def __init__(self):
self.threads = []
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
self.threads.append(callback)
def add_thread(self, callback, *args, **kwargs):
self.threads.append(callback)
def stop(self):
pass
def wait(self):
pass
class StackCreateTest(HeatTestCase):
def setUp(self):
super(StackCreateTest, self).setUp()
utils.setup_dummy_db()
def test_wordpress_single_instance_stack_create(self):
stack = get_wordpress_stack('test_stack', utils.dummy_context())
setup_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
self.assertNotEqual(stack['WebServer'], None)
self.assertTrue(stack['WebServer'].resource_id > 0)
self.assertNotEqual(stack['WebServer'].ipaddress, '0.0.0.0')
def test_wordpress_single_instance_stack_delete(self):
ctx = utils.dummy_context()
stack = get_wordpress_stack('test_stack', ctx)
fc = setup_mocks(self.m, stack)
self.m.ReplayAll()
stack_id = stack.store()
stack.create()
db_s = db_api.stack_get(ctx, stack_id)
self.assertNotEqual(db_s, None)
self.assertNotEqual(stack['WebServer'], None)
self.assertTrue(stack['WebServer'].resource_id > 0)
self.m.StubOutWithMock(fc.client, 'get_servers_9999')
get = fc.client.get_servers_9999
get().AndRaise(service.clients.novaclient.exceptions.NotFound(404))
mox.Replay(get)
stack.delete()
rsrc = stack['WebServer']
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.assertEqual((stack.DELETE, stack.COMPLETE), rsrc.state)
self.assertEqual(None, db_api.stack_get(ctx, stack_id))
self.assertEqual('DELETE', db_s.action)
self.assertEqual('COMPLETE', db_s.status, )
class StackServiceCreateUpdateDeleteTest(HeatTestCase):
def setUp(self):
super(StackServiceCreateUpdateDeleteTest, self).setUp()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
def _test_stack_create(self, stack_name):
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.StubOutWithMock(parser, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(parser, 'Stack')
parser.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, stack.env).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.StubOutWithMock(threadgroup, 'ThreadGroup')
threadgroup.ThreadGroup().AndReturn(DummyThreadGroup())
self.m.ReplayAll()
result = self.man.create_stack(self.ctx, stack_name,
template, params, None, {})
self.assertEqual(stack.identifier(), result)
self.assertTrue(isinstance(result, dict))
self.assertTrue(result['stack_id'])
self.m.VerifyAll()
def test_stack_create(self):
stack_name = 'service_create_test_stack'
self._test_stack_create(stack_name)
def test_stack_create_equals_max_per_tenant(self):
cfg.CONF.set_override('max_stacks_per_tenant', 1)
stack_name = 'service_create_test_stack_equals_max'
self._test_stack_create(stack_name)
def test_stack_create_exceeds_max_per_tenant(self):
cfg.CONF.set_override('max_stacks_per_tenant', 0)
stack_name = 'service_create_test_stack_exceeds_max'
exc = self.assertRaises(exception.RequestLimitExceeded,
self._test_stack_create, stack_name)
self.assertIn("You have reached the maximum stacks per tenant",
str(exc))
def test_stack_create_verify_err(self):
stack_name = 'service_create_verify_err_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.StubOutWithMock(parser, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(parser, 'Stack')
parser.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
stack.env).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndRaise(exception.StackValidationFailed(
message='fubar'))
self.m.ReplayAll()
self.assertRaises(
exception.StackValidationFailed,
self.man.create_stack,
self.ctx, stack_name,
template, params, None, {})
self.m.VerifyAll()
def test_stack_create_invalid_stack_name(self):
stack_name = 'service_create/test_stack'
stack = get_wordpress_stack('test_stack', self.ctx)
self.assertRaises(ValueError,
self.man.create_stack,
self.ctx, stack_name, stack.t, {}, None, {})
def test_stack_create_invalid_resource_name(self):
stack_name = 'service_create_test_stack_invalid_res'
stack = get_wordpress_stack(stack_name, self.ctx)
tmpl = dict(stack.t)
tmpl['Resources']['Web/Server'] = tmpl['Resources']['WebServer']
del tmpl['Resources']['WebServer']
self.assertRaises(ValueError,
self.man.create_stack,
self.ctx, stack_name,
stack.t, {}, None, {})
def test_stack_create_no_credentials(self):
stack_name = 'test_stack_create_no_credentials'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stack = get_wordpress_stack(stack_name, self.ctx)
# force check for credentials on create
stack['WebServer'].requires_deferred_auth = True
self.m.StubOutWithMock(parser, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(parser, 'Stack')
ctx_no_pwd = utils.dummy_context(password=None)
ctx_no_user = utils.dummy_context(user=None)
parser.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(ctx_no_pwd, stack.name,
stack.t, stack.env).AndReturn(stack)
parser.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(ctx_no_user, stack.name,
stack.t, stack.env).AndReturn(stack)
self.m.ReplayAll()
ex = self.assertRaises(exception.MissingCredentialError,
self.man.create_stack,
ctx_no_pwd, stack_name,
template, params, None, {})
self.assertEqual(
'Missing required credential: X-Auth-Key', str(ex))
ex = self.assertRaises(exception.MissingCredentialError,
self.man.create_stack,
ctx_no_user, stack_name,
template, params, None, {})
self.assertEqual(
'Missing required credential: X-Auth-User', str(ex))
def test_stack_create_total_resources_equals_max(self):
stack_name = 'service_create_stack_total_resources_equals_max'
params = {}
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
tpl = {'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = parser.Template(tpl)
stack = parser.Stack(self.ctx, stack_name, template,
environment.Environment({}))
self.m.StubOutWithMock(parser, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(parser, 'Stack')
parser.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
stack.env).AndReturn(stack)
self.m.ReplayAll()
cfg.CONF.set_override('max_resources_per_stack', 3)
result = self.man.create_stack(self.ctx, stack_name, template, params,
None, {})
self.m.VerifyAll()
self.assertEqual(stack.identifier(), result)
self.assertEqual(3, stack.total_resources())
stack.delete()
def test_stack_create_total_resources_exceeds_max(self):
stack_name = 'service_create_stack_total_resources_exceeds_max'
params = {}
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
tpl = {'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = parser.Template(tpl)
cfg.CONF.set_override('max_resources_per_stack', 2)
ex = self.assertRaises(exception.RequestLimitExceeded,
self.man.create_stack, self.ctx, stack_name,
template, params, None, {})
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
str(ex))
def test_stack_validate(self):
stack_name = 'service_create_test_validate'
stack = get_wordpress_stack(stack_name, self.ctx)
setup_mocks(self.m, stack)
template = dict(stack.t)
template['Parameters']['KeyName']['Default'] = 'test'
resource = stack['WebServer']
self.m.ReplayAll()
resource.properties = Properties(
resource.properties_schema,
{
'ImageId': 'CentOS 5.2',
'KeyName': 'test',
'InstanceType': 'm1.large'
})
stack.validate()
resource.properties = Properties(
resource.properties_schema,
{
'KeyName': 'test',
'InstanceType': 'm1.large'
})
self.assertRaises(exception.StackValidationFailed, stack.validate)
def test_stack_delete(self):
stack_name = 'service_delete_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
s = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(stack)
self.man.tg = DummyThreadGroup()
self.m.ReplayAll()
self.assertEqual(None,
self.man.delete_stack(self.ctx, stack.identifier()))
self.m.VerifyAll()
def test_stack_delete_nonexist(self):
stack_name = 'service_delete_nonexist_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.ReplayAll()
self.assertRaises(exception.StackNotFound,
self.man.delete_stack,
self.ctx, stack.identifier())
self.m.VerifyAll()
def test_stack_update(self):
stack_name = 'service_update_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = get_wordpress_stack(stack_name, self.ctx)
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(old_stack)
self.m.StubOutWithMock(parser, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
parser.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, stack.env).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.StubOutWithMock(threadgroup, 'ThreadGroup')
threadgroup.ThreadGroup().AndReturn(DummyThreadGroup())
self.m.ReplayAll()
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, {})
self.assertEqual(old_stack.identifier(), result)
self.assertTrue(isinstance(result, dict))
self.assertTrue(result['stack_id'])
self.m.VerifyAll()
def test_stack_update_equals(self):
stack_name = 'test_stack_update_equals_resource_limit'
params = {}
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
tpl = {'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = parser.Template(tpl)
old_stack = parser.Stack(self.ctx, stack_name, template)
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
stack = parser.Stack(self.ctx, stack_name, template)
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(old_stack)
self.m.StubOutWithMock(parser, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
parser.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, stack.env).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.StubOutWithMock(threadgroup, 'ThreadGroup')
threadgroup.ThreadGroup().AndReturn(DummyThreadGroup())
self.m.ReplayAll()
cfg.CONF.set_override('max_resources_per_stack', 3)
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, {})
self.assertEqual(old_stack.identifier(), result)
self.assertTrue(isinstance(result, dict))
self.assertTrue(result['stack_id'])
self.assertEqual(3, old_stack.root_stack.total_resources())
self.m.VerifyAll()
def test_stack_update_exceeds_resource_limit(self):
stack_name = 'test_stack_update_exceeds_resource_limit'
params = {}
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
tpl = {'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = parser.Template(tpl)
old_stack = parser.Stack(self.ctx, stack_name, template)
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
cfg.CONF.set_override('max_resources_per_stack', 2)
ex = self.assertRaises(exception.RequestLimitExceeded,
self.man.update_stack, self.ctx,
old_stack.identifier(), template, params, None,
{})
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
str(ex))
def test_stack_update_verify_err(self):
stack_name = 'service_update_verify_err_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = get_wordpress_stack(stack_name, self.ctx)
old_stack.store()
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(old_stack)
self.m.StubOutWithMock(parser, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
parser.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, stack.env).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndRaise(exception.StackValidationFailed(
message='fubar'))
self.m.ReplayAll()
self.assertRaises(
exception.StackValidationFailed,
self.man.update_stack,
self.ctx, old_stack.identifier(),
template, params, None, {})
self.m.VerifyAll()
def test_stack_update_nonexist(self):
stack_name = 'service_update_nonexist_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.ReplayAll()
self.assertRaises(exception.StackNotFound,
self.man.update_stack,
self.ctx, stack.identifier(), template, params,
None, {})
self.m.VerifyAll()
def test_stack_update_no_credentials(self):
stack_name = 'test_stack_update_no_credentials'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = get_wordpress_stack(stack_name, self.ctx)
# force check for credentials on create
old_stack['WebServer'].requires_deferred_auth = True
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
self.ctx = utils.dummy_context(password=None)
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
self.m.StubOutWithMock(parser, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(self.man, '_get_stack')
self.man._get_stack(self.ctx, old_stack.identifier()).AndReturn(s)
parser.Stack.load(self.ctx, stack=s).AndReturn(old_stack)
parser.Template(template, files=None).AndReturn(old_stack.t)
environment.Environment(params).AndReturn(old_stack.env)
parser.Stack(self.ctx, old_stack.name,
old_stack.t, old_stack.env).AndReturn(old_stack)
self.m.ReplayAll()
ex = self.assertRaises(exception.MissingCredentialError,
self.man.update_stack, self.ctx,
old_stack.identifier(),
template, params, None, {})
self.assertEqual(
'Missing required credential: X-Auth-Key', str(ex))
self.m.VerifyAll()
def test_validate_deferred_auth_context_trusts(self):
stack = get_wordpress_stack('test_deferred_auth', self.ctx)
stack['WebServer'].requires_deferred_auth = True
ctx = utils.dummy_context(user=None, password=None)
cfg.CONF.set_default('deferred_auth_method', 'trusts')
# using trusts, no username or password required
self.man._validate_deferred_auth_context(ctx, stack)
def test_validate_deferred_auth_context_not_required(self):
stack = get_wordpress_stack('test_deferred_auth', self.ctx)
stack['WebServer'].requires_deferred_auth = False
ctx = utils.dummy_context(user=None, password=None)
cfg.CONF.set_default('deferred_auth_method', 'password')
# stack performs no deferred operations, so no username or
# password required
self.man._validate_deferred_auth_context(ctx, stack)
def test_validate_deferred_auth_context_missing_credentials(self):
stack = get_wordpress_stack('test_deferred_auth', self.ctx)
stack['WebServer'].requires_deferred_auth = True
cfg.CONF.set_default('deferred_auth_method', 'password')
# missing username
ctx = utils.dummy_context(user=None)
ex = self.assertRaises(exception.MissingCredentialError,
self.man._validate_deferred_auth_context,
ctx, stack)
self.assertEqual(
'Missing required credential: X-Auth-User', str(ex))
# missing password
ctx = utils.dummy_context(password=None)
ex = self.assertRaises(exception.MissingCredentialError,
self.man._validate_deferred_auth_context,
ctx, stack)
self.assertEqual(
'Missing required credential: X-Auth-Key', str(ex))
class StackServiceUpdateNotSupportedTest(HeatTestCase):
scenarios = [
('suspend_in_progress', dict(action='SUSPEND', status='IN_PROGRESS')),
('suspend_complete', dict(action='SUSPEND', status='COMPLETE')),
('suspend_failed', dict(action='SUSPEND', status='FAILED')),
('create_in_progress', dict(action='CREATE', status='IN_PROGRESS')),
('delete_in_progress', dict(action='DELETE', status='IN_PROGRESS')),
('update_in_progress', dict(action='UPDATE', status='IN_PROGRESS')),
('rb_in_progress', dict(action='ROLLBACK', status='IN_PROGRESS')),
('suspend_in_progress', dict(action='SUSPEND', status='IN_PROGRESS')),
('resume_in_progress', dict(action='RESUME', status='IN_PROGRESS')),
]
def setUp(self):
super(StackServiceUpdateNotSupportedTest, self).setUp()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
def test_stack_update_during(self):
stack_name = '%s-%s' % (self.action, self.status)
old_stack = get_wordpress_stack(stack_name, self.ctx)
old_stack.action = self.action
old_stack.status = self.status
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(old_stack)
self.m.ReplayAll()
params = {'foo': 'bar'}
template = '{ "Resources": {} }'
self.assertRaises(exception.NotSupported,
self.man.update_stack,
self.ctx, old_stack.identifier(), template, params,
None, {})
self.m.VerifyAll()
class StackServiceSuspendResumeTest(HeatTestCase):
def setUp(self):
super(StackServiceSuspendResumeTest, self).setUp()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
def test_stack_suspend(self):
stack_name = 'service_suspend_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
s = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(stack)
self.m.StubOutWithMock(service.EngineService, '_start_in_thread')
service.EngineService._start_in_thread(sid,
mox.IgnoreArg(),
stack).AndReturn(None)
self.m.ReplayAll()
result = self.man.stack_suspend(self.ctx, stack.identifier())
self.assertEqual(None, result)
self.m.VerifyAll()
@stack_context('service_resume_test_stack', False)
def test_stack_resume(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.StubOutWithMock(service.EngineService, '_start_in_thread')
service.EngineService._start_in_thread(self.stack.id,
mox.IgnoreArg(),
self.stack).AndReturn(None)
self.m.ReplayAll()
result = self.man.stack_resume(self.ctx, self.stack.identifier())
self.assertEqual(None, result)
self.m.VerifyAll()
def test_stack_suspend_nonexist(self):
stack_name = 'service_suspend_nonexist_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.ReplayAll()
self.assertRaises(exception.StackNotFound,
self.man.stack_suspend, self.ctx, stack.identifier())
self.m.VerifyAll()
def test_stack_resume_nonexist(self):
stack_name = 'service_resume_nonexist_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.ReplayAll()
self.assertRaises(exception.StackNotFound,
self.man.stack_resume, self.ctx, stack.identifier())
self.m.VerifyAll()
class StackServiceTest(HeatTestCase):
def setUp(self):
super(StackServiceTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
_register_class('ResourceWithPropsType',
generic_rsrc.ResourceWithProps)
utils.setup_dummy_db()
@stack_context('service_identify_test_stack', False)
def test_stack_identify(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
identity = self.eng.identify_stack(self.ctx, self.stack.name)
self.assertEqual(self.stack.identifier(), identity)
self.m.VerifyAll()
@stack_context('service_identify_uuid_test_stack', False)
def test_stack_identify_uuid(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
identity = self.eng.identify_stack(self.ctx, self.stack.id)
self.assertEqual(self.stack.identifier(), identity)
self.m.VerifyAll()
def test_stack_identify_nonexist(self):
self.assertRaises(exception.StackNotFound, self.eng.identify_stack,
self.ctx, 'wibble')
@stack_context('service_create_existing_test_stack', False)
def test_stack_create_existing(self):
self.assertRaises(exception.StackExists, self.eng.create_stack,
self.ctx, self.stack.name, self.stack.t, {},
None, {})
@stack_context('service_name_tenants_test_stack', False)
def test_stack_by_name_tenants(self):
self.assertEqual(self.stack.id,
db_api.stack_get_by_name(self.ctx,
self.stack.name).id)
ctx2 = utils.dummy_context(tenant_id='stack_service_test_tenant2')
self.assertEqual(None, db_api.stack_get_by_name(ctx2, self.stack.name))
@stack_context('service_event_list_test_stack')
def test_stack_event_list(self):
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = db_api.stack_get(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier(),
show_deleted=True).AndReturn(s)
self.m.ReplayAll()
events = self.eng.list_events(self.ctx, self.stack.identifier())
self.assertEqual(2, len(events))
for ev in events:
self.assertTrue('event_identity' in ev)
self.assertEqual(dict, type(ev['event_identity']))
self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
self.assertTrue('resource_name' in ev)
self.assertEqual('WebServer', ev['resource_name'])
self.assertTrue('physical_resource_id' in ev)
self.assertTrue('resource_properties' in ev)
# Big long user data field.. it mentions 'wordpress'
# a few times so this should work.
user_data = ev['resource_properties']['UserData']
self.assertNotEqual(user_data.find('wordpress'), -1)
self.assertEqual('F17-x86_64-gold',
ev['resource_properties']['ImageId'])
self.assertEqual('m1.large',
ev['resource_properties']['InstanceType'])
self.assertEqual('CREATE', ev['resource_action'])
self.assertTrue(ev['resource_status'] in ('IN_PROGRESS',
'COMPLETE'))
self.assertTrue('resource_status_reason' in ev)
self.assertEqual('state changed', ev['resource_status_reason'])
self.assertTrue('resource_type' in ev)
self.assertEqual('AWS::EC2::Instance', ev['resource_type'])
self.assertTrue('stack_identity' in ev)
self.assertTrue('stack_name' in ev)
self.assertEqual(self.stack.name, ev['stack_name'])
self.assertTrue('event_time' in ev)
self.m.VerifyAll()
@stack_context('event_list_deleted_stack')
def test_stack_event_list_deleted_resource(self):
rsrs._register_class('GenericResourceType',
generic_rsrc.GenericResource)
def run(stack_id, func, *args):
func(*args)
self.eng._start_in_thread = run
new_tmpl = {'Resources': {'AResource': {'Type':
'GenericResourceType'}}}
self.m.StubOutWithMock(instances.Instance, 'handle_delete')
instances.Instance.handle_delete()
self.m.ReplayAll()
result = self.eng.update_stack(self.ctx, self.stack.identifier(),
new_tmpl, None, None, {})
# The self.stack reference needs to be updated. Since the underlying
# stack is updated in update_stack, the original reference is now
# pointing to an orphaned stack object.
self.stack = parser.Stack.load(self.ctx, stack_id=result['stack_id'])
self.assertEqual(self.stack.identifier(), result)
self.assertTrue(isinstance(result, dict))
self.assertTrue(result['stack_id'])
events = self.eng.list_events(self.ctx, self.stack.identifier())
self.assertEqual(6, len(events))
for ev in events:
self.assertIn('event_identity', ev)
self.assertEqual(dict, type(ev['event_identity']))
self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
self.assertIn('resource_name', ev)
self.assertIn('physical_resource_id', ev)
self.assertIn('resource_properties', ev)
self.assertIn('resource_status_reason', ev)
self.assertIn(ev['resource_action'], ('CREATE', 'DELETE'))
self.assertIn(ev['resource_status'], ('IN_PROGRESS', 'COMPLETE'))
self.assertIn('resource_type', ev)
self.assertIn(ev['resource_type'], ('AWS::EC2::Instance',
'GenericResourceType'))
self.assertIn('stack_identity', ev)
self.assertIn('stack_name', ev)
self.assertEqual(self.stack.name, ev['stack_name'])
self.assertIn('event_time', ev)
self.m.VerifyAll()
@stack_context('service_event_list_test_stack')
def test_stack_event_list_by_tenant(self):
events = self.eng.list_events(self.ctx, None)
self.assertEqual(2, len(events))
for ev in events:
self.assertIn('event_identity', ev)
self.assertThat(ev['event_identity'], matchers.IsInstance(dict))
self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
self.assertTrue('resource_name' in ev)
self.assertEqual('WebServer', ev['resource_name'])
self.assertTrue('physical_resource_id' in ev)
self.assertTrue('resource_properties' in ev)
# Big long user data field.. it mentions 'wordpress'
# a few times so this should work.
user_data = ev['resource_properties']['UserData']
self.assertIn('wordpress', user_data)
self.assertEqual('F17-x86_64-gold',
ev['resource_properties']['ImageId'])
self.assertEqual('m1.large',
ev['resource_properties']['InstanceType'])
self.assertEqual('CREATE', ev['resource_action'])
self.assertIn(ev['resource_status'], ('IN_PROGRESS', 'COMPLETE'))
self.assertIn('resource_status_reason', ev)
self.assertEqual('state changed', ev['resource_status_reason'])
self.assertIn('resource_type', ev)
self.assertEqual('AWS::EC2::Instance', ev['resource_type'])
self.assertIn('stack_identity', ev)
self.assertIn('stack_name', ev)
self.assertEqual(self.stack.name, ev['stack_name'])
self.assertIn('event_time', ev)
self.m.VerifyAll()
@stack_context('service_list_all_test_stack')
def test_stack_list_all(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=mox.IgnoreArg(), resolve_data=False)\
.AndReturn(self.stack)
self.m.ReplayAll()
sl = self.eng.list_stacks(self.ctx)
self.assertEqual(1, len(sl))
for s in sl:
self.assertTrue('creation_time' in s)
self.assertTrue('updated_time' in s)
self.assertTrue('stack_identity' in s)
self.assertNotEqual(s['stack_identity'], None)
self.assertTrue('stack_name' in s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertTrue('stack_status' in s)
self.assertTrue('stack_status_reason' in s)
self.assertTrue('description' in s)
self.assertNotEqual(s['description'].find('WordPress'), -1)
self.m.VerifyAll()
def test_stack_describe_nonexistent(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier,
show_deleted=True).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
self.assertRaises(exception.StackNotFound,
self.eng.show_stack,
self.ctx, non_exist_identifier)
self.m.VerifyAll()
def test_stack_describe_bad_tenant(self):
non_exist_identifier = identifier.HeatIdentifier(
'wibble', 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
invalid_tenant_exc = exception.InvalidTenant(target='test',
actual='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier,
show_deleted=True).AndRaise(invalid_tenant_exc)
self.m.ReplayAll()
self.assertRaises(exception.InvalidTenant,
self.eng.show_stack,
self.ctx, non_exist_identifier)
self.m.VerifyAll()
@stack_context('service_describe_test_stack', False)
def test_stack_describe(self):
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = db_api.stack_get(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier(),
show_deleted=True).AndReturn(s)
self.m.ReplayAll()
sl = self.eng.show_stack(self.ctx, self.stack.identifier())
self.assertEqual(1, len(sl))
s = sl[0]
self.assertTrue('creation_time' in s)
self.assertTrue('updated_time' in s)
self.assertTrue('stack_identity' in s)
self.assertNotEqual(s['stack_identity'], None)
self.assertTrue('stack_name' in s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertTrue('stack_status' in s)
self.assertTrue('stack_status_reason' in s)
self.assertTrue('description' in s)
self.assertNotEqual(s['description'].find('WordPress'), -1)
self.assertTrue('parameters' in s)
self.m.VerifyAll()
@stack_context('service_describe_all_test_stack', False)
def test_stack_describe_all(self):
sl = self.eng.show_stack(self.ctx, None)
self.assertEqual(1, len(sl))
s = sl[0]
self.assertTrue('creation_time' in s)
self.assertTrue('updated_time' in s)
self.assertTrue('stack_identity' in s)
self.assertNotEqual(s['stack_identity'], None)
self.assertTrue('stack_name' in s)
self.assertEqual(self.stack.name, s['stack_name'])
self.assertTrue('stack_status' in s)
self.assertTrue('stack_status_reason' in s)
self.assertTrue('description' in s)
self.assertNotEqual(s['description'].find('WordPress'), -1)
self.assertTrue('parameters' in s)
def test_list_resource_types(self):
resources = self.eng.list_resource_types(self.ctx)
self.assertTrue(isinstance(resources, list))
self.assertTrue('AWS::EC2::Instance' in resources)
def test_resource_schema(self):
type_name = 'ResourceWithPropsType'
expected = {
'resource_type': type_name,
'properties': {
'Foo': {
'type': 'string',
'required': False,
'update_allowed': False,
},
},
'attributes': {
'foo': {'description': 'A generic attribute'},
'Foo': {'description': 'Another generic attribute'},
},
}
schema = self.eng.resource_schema(self.ctx, type_name=type_name)
self.assertEqual(expected, schema)
def test_resource_schema_nonexist(self):
self.assertRaises(exception.ResourceTypeNotFound,
self.eng.resource_schema,
self.ctx, type_name='Bogus')
@stack_context('service_stack_resource_describe__test_stack')
def test_stack_resource_describe(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer')
self.assertTrue('resource_identity' in r)
self.assertTrue('description' in r)
self.assertTrue('updated_time' in r)
self.assertTrue('stack_identity' in r)
self.assertNotEqual(r['stack_identity'], None)
self.assertTrue('stack_name' in r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertTrue('metadata' in r)
self.assertTrue('resource_status' in r)
self.assertTrue('resource_status_reason' in r)
self.assertTrue('resource_type' in r)
self.assertTrue('physical_resource_id' in r)
self.assertTrue('resource_name' in r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
def test_stack_resource_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id,
'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
self.assertRaises(exception.StackNotFound,
self.eng.describe_stack_resource,
self.ctx, non_exist_identifier, 'WebServer')
self.m.VerifyAll()
@stack_context('service_resource_describe_nonexist_test_stack')
def test_stack_resource_describe_nonexist_resource(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
self.assertRaises(exception.ResourceNotFound,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.m.VerifyAll()
@stack_context('service_resource_describe_user_deny_test_stack')
def test_stack_resource_describe_stack_user_deny(self):
self.ctx.roles = [cfg.CONF.heat_stack_user_role]
self.m.StubOutWithMock(service.EngineService, '_authorize_stack_user')
service.EngineService._authorize_stack_user(self.ctx, mox.IgnoreArg(),
'foo').AndReturn(False)
self.m.ReplayAll()
self.assertRaises(exception.Forbidden,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.m.VerifyAll()
@stack_context('service_authorize_stack_user_nocreds_test_stack')
def test_stack_authorize_stack_user_nocreds(self):
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
@stack_context('service_authorize_user_attribute_error_test_stack')
def test_stack_authorize_stack_user_attribute_error(self):
self.m.StubOutWithMock(json, 'loads')
json.loads(None).AndRaise(AttributeError)
self.m.ReplayAll()
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
self.m.VerifyAll()
@stack_context('service_authorize_stack_user_type_error_test_stack')
def test_stack_authorize_stack_user_type_error(self):
self.m.StubOutWithMock(json, 'loads')
json.loads(mox.IgnoreArg()).AndRaise(TypeError)
self.m.ReplayAll()
self.assertFalse(self.eng._authorize_stack_user(self.ctx,
self.stack,
'foo'))
self.m.VerifyAll()
@stack_context('service_resources_describe_test_stack')
def test_stack_resources_describe(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
'WebServer')
self.assertEqual(1, len(resources))
r = resources[0]
self.assertTrue('resource_identity' in r)
self.assertTrue('description' in r)
self.assertTrue('updated_time' in r)
self.assertTrue('stack_identity' in r)
self.assertNotEqual(r['stack_identity'], None)
self.assertTrue('stack_name' in r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertTrue('resource_status' in r)
self.assertTrue('resource_status_reason' in r)
self.assertTrue('resource_type' in r)
self.assertTrue('physical_resource_id' in r)
self.assertTrue('resource_name' in r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
@stack_context('service_resources_describe_no_filter_test_stack')
def test_stack_resources_describe_no_filter(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
self.assertEqual(1, len(resources))
r = resources[0]
self.assertTrue('resource_name' in r)
self.assertEqual('WebServer', r['resource_name'])
self.m.VerifyAll()
def test_stack_resources_describe_bad_lookup(self):
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, None).AndRaise(TypeError)
self.m.ReplayAll()
self.assertRaises(TypeError,
self.eng.describe_stack_resources,
self.ctx, None, 'WebServer')
self.m.VerifyAll()
def test_stack_resources_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
self.assertRaises(exception.StackNotFound,
self.eng.describe_stack_resources,
self.ctx, non_exist_identifier, 'WebServer')
@stack_context('find_phys_res_stack')
def test_find_physical_resource(self):
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
phys_id = resources[0]['physical_resource_id']
result = self.eng.find_physical_resource(self.ctx, phys_id)
self.assertTrue(isinstance(result, dict))
resource_identity = identifier.ResourceIdentifier(**result)
self.assertEqual(self.stack.identifier(), resource_identity.stack())
self.assertEqual('WebServer', resource_identity.resource_name)
def test_find_physical_resource_nonexist(self):
self.assertRaises(exception.PhysicalResourceNotFound,
self.eng.find_physical_resource,
self.ctx, 'foo')
@stack_context('service_resources_list_test_stack')
def test_stack_resources_list(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier())
self.assertEqual(1, len(resources))
r = resources[0]
self.assertTrue('resource_identity' in r)
self.assertTrue('updated_time' in r)
self.assertTrue('physical_resource_id' in r)
self.assertTrue('resource_name' in r)
self.assertEqual('WebServer', r['resource_name'])
self.assertTrue('resource_status' in r)
self.assertTrue('resource_status_reason' in r)
self.assertTrue('resource_type' in r)
self.m.VerifyAll()
def test_stack_resources_list_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
self.assertRaises(exception.StackNotFound,
self.eng.list_stack_resources,
self.ctx, non_exist_identifier)
self.m.VerifyAll()
def test_signal_reception(self):
stack = get_stack('signal_reception',
self.ctx,
policy_template)
self.stack = stack
setup_keystone_mocks(self.m, stack)
self.m.ReplayAll()
stack.store()
stack.create()
test_data = {'food': 'yum'}
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = db_api.stack_get(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(service.EngineService, '_load_user_creds')
service.EngineService._load_user_creds(
mox.IgnoreArg()).AndReturn(self.ctx)
self.m.StubOutWithMock(rsrs.Resource, 'signal')
rsrs.Resource.signal(mox.IgnoreArg()).AndReturn(None)
self.m.ReplayAll()
result = self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.m.VerifyAll()
self.stack.delete()
def test_signal_reception_no_resource(self):
stack = get_stack('signal_reception_no_resource',
self.ctx,
policy_template)
setup_keystone_mocks(self.m, stack)
self.stack = stack
self.m.ReplayAll()
stack.store()
stack.create()
test_data = {'food': 'yum'}
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = db_api.stack_get(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(service.EngineService, '_load_user_creds')
service.EngineService._load_user_creds(
mox.IgnoreArg()).AndReturn(self.ctx)
self.m.ReplayAll()
self.assertRaises(exception.ResourceNotFound,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'resource_does_not_exist',
test_data)
self.m.VerifyAll()
self.stack.delete()
@stack_context('service_metadata_test_stack')
def test_metadata(self):
test_metadata = {'foo': 'bar', 'baz': 'quux', 'blarg': 'wibble'}
pre_update_meta = self.stack['WebServer'].metadata
self.m.StubOutWithMock(service.EngineService, '_get_stack')
s = db_api.stack_get(self.ctx, self.stack.id)
service.EngineService._get_stack(self.ctx,
self.stack.identifier()).AndReturn(s)
self.m.StubOutWithMock(instances.Instance, 'metadata_update')
instances.Instance.metadata_update(new_metadata=test_metadata)
self.m.StubOutWithMock(service.EngineService, '_load_user_creds')
service.EngineService._load_user_creds(
mox.IgnoreArg()).AndReturn(self.ctx)
self.m.ReplayAll()
result = self.eng.metadata_update(self.ctx,
dict(self.stack.identifier()),
'WebServer', test_metadata)
# metadata_update is a no-op for all resources except
# WaitConditionHandle so we don't expect this to have changed
self.assertEqual(pre_update_meta, result)
self.m.VerifyAll()
def test_metadata_err_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
stack_not_found_exc = exception.StackNotFound(stack_name='test')
self.m.StubOutWithMock(service.EngineService, '_get_stack')
service.EngineService._get_stack(
self.ctx, non_exist_identifier).AndRaise(stack_not_found_exc)
self.m.ReplayAll()
test_metadata = {'foo': 'bar', 'baz': 'quux', 'blarg': 'wibble'}
self.assertRaises(exception.StackNotFound,
self.eng.metadata_update,
self.ctx, non_exist_identifier,
'WebServer', test_metadata)
self.m.VerifyAll()
@stack_context('service_metadata_err_resource_test_stack', False)
def test_metadata_err_resource(self):
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx,
stack=mox.IgnoreArg()).AndReturn(self.stack)
self.m.ReplayAll()
test_metadata = {'foo': 'bar', 'baz': 'quux', 'blarg': 'wibble'}
self.assertRaises(exception.ResourceNotFound,
self.eng.metadata_update,
self.ctx, dict(self.stack.identifier()),
'NooServer', test_metadata)
self.m.VerifyAll()
@stack_context('periodic_watch_task_not_created')
def test_periodic_watch_task_not_created(self):
self.eng.stg[self.stack.id] = DummyThreadGroup()
self.eng._start_watch_task(self.stack.id, self.ctx)
self.assertEqual([], self.eng.stg[self.stack.id].threads)
def test_periodic_watch_task_created(self):
stack = get_stack('period_watch_task_created',
utils.dummy_context(),
alarm_template)
self.stack = stack
self.m.ReplayAll()
stack.store()
stack.create()
self.eng.stg[stack.id] = DummyThreadGroup()
self.eng._start_watch_task(stack.id, self.ctx)
self.assertEqual([self.eng._periodic_watcher_task],
self.eng.stg[stack.id].threads)
self.stack.delete()
def test_periodic_watch_task_created_nested(self):
self.m.StubOutWithMock(urlfetch, 'get')
urlfetch.get('https://server.test/alarm.template').MultipleTimes().\
AndReturn(alarm_template)
self.m.ReplayAll()
stack = get_stack('period_watch_task_created_nested',
utils.dummy_context(),
nested_alarm_template)
setup_keystone_mocks(self.m, stack)
self.stack = stack
self.m.ReplayAll()
stack.store()
stack.create()
self.eng.stg[stack.id] = DummyThreadGroup()
self.eng._start_watch_task(stack.id, self.ctx)
self.assertEqual([self.eng._periodic_watcher_task],
self.eng.stg[stack.id].threads)
self.stack.delete()
@stack_context('service_show_watch_test_stack', False)
@utils.wr_delete_after
def test_show_watch(self):
# Insert two dummy watch rules into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = []
self.wr.append(watchrule.WatchRule(context=self.ctx,
watch_name='show_watch_1',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL'))
self.wr[0].store()
self.wr.append(watchrule.WatchRule(context=self.ctx,
watch_name='show_watch_2',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL'))
self.wr[1].store()
# watch_name=None should return all watches
result = self.eng.show_watch(self.ctx, watch_name=None)
result_names = [r.get('name') for r in result]
self.assertIn('show_watch_1', result_names)
self.assertIn('show_watch_2', result_names)
result = self.eng.show_watch(self.ctx, watch_name="show_watch_1")
self.assertEqual(1, len(result))
self.assertIn('name', result[0])
self.assertEqual('show_watch_1', result[0]['name'])
result = self.eng.show_watch(self.ctx, watch_name="show_watch_2")
self.assertEqual(1, len(result))
self.assertIn('name', result[0])
self.assertEqual('show_watch_2', result[0]['name'])
self.assertRaises(exception.WatchRuleNotFound,
self.eng.show_watch,
self.ctx, watch_name="nonexistent")
# Check the response has all keys defined in the engine API
for key in engine_api.WATCH_KEYS:
self.assertTrue(key in result[0])
@stack_context('service_show_watch_metric_test_stack', False)
@utils.wr_delete_after
def test_show_watch_metric(self):
# Insert dummy watch rule into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='show_watch_metric_1',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL')
self.wr.store()
# And add a metric datapoint
watch = db_api.watch_rule_get_by_name(self.ctx, 'show_watch_metric_1')
self.assertNotEqual(watch, None)
values = {'watch_rule_id': watch.id,
'data': {u'Namespace': u'system/linux',
u'ServiceFailure': {
u'Units': u'Counter', u'Value': 1}}}
watch = db_api.watch_data_create(self.ctx, values)
# Check there is one result returned
result = self.eng.show_watch_metric(self.ctx,
metric_namespace=None,
metric_name=None)
self.assertEqual(1, len(result))
# Create another metric datapoint and check we get two
watch = db_api.watch_data_create(self.ctx, values)
result = self.eng.show_watch_metric(self.ctx,
metric_namespace=None,
metric_name=None)
self.assertEqual(2, len(result))
# Check the response has all keys defined in the engine API
for key in engine_api.WATCH_DATA_KEYS:
self.assertTrue(key in result[0])
@stack_context('service_show_watch_state_test_stack')
@utils.wr_delete_after
def test_set_watch_state(self):
# Insert dummy watch rule into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='OverrideAlarm',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL')
self.wr.store()
class DummyAction(object):
signal = "dummyfoo"
dummy_action = DummyAction()
self.m.StubOutWithMock(parser.Stack, 'resource_by_refid')
parser.Stack.resource_by_refid(
'WebServerRestartPolicy').AndReturn(dummy_action)
# Replace the real stack threadgroup with a dummy one, so we can
# check the function returned on ALARM is correctly scheduled
self.eng.stg[self.stack.id] = DummyThreadGroup()
self.m.ReplayAll()
state = watchrule.WatchRule.NODATA
result = self.eng.set_watch_state(self.ctx,
watch_name="OverrideAlarm",
state=state)
self.assertEqual(state, result[engine_api.WATCH_STATE_VALUE])
self.assertEqual([], self.eng.stg[self.stack.id].threads)
state = watchrule.WatchRule.NORMAL
result = self.eng.set_watch_state(self.ctx,
watch_name="OverrideAlarm",
state=state)
self.assertEqual(state, result[engine_api.WATCH_STATE_VALUE])
self.assertEqual([], self.eng.stg[self.stack.id].threads)
state = watchrule.WatchRule.ALARM
result = self.eng.set_watch_state(self.ctx,
watch_name="OverrideAlarm",
state=state)
self.assertEqual(state, result[engine_api.WATCH_STATE_VALUE])
self.assertEqual([DummyAction.signal],
self.eng.stg[self.stack.id].threads)
self.m.VerifyAll()
@stack_context('service_show_watch_state_badstate_test_stack')
@utils.wr_delete_after
def test_set_watch_state_badstate(self):
# Insert dummy watch rule into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='OverrideAlarm2',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL')
self.wr.store()
self.m.StubOutWithMock(watchrule.WatchRule, 'set_watch_state')
for state in ["HGJHGJHG", "1234", "!\*(&%"]:
watchrule.WatchRule.set_watch_state(state)\
.InAnyOrder().AndRaise(ValueError)
self.m.ReplayAll()
for state in ["HGJHGJHG", "1234", "!\*(&%"]:
self.assertRaises(ValueError,
self.eng.set_watch_state,
self.ctx, watch_name="OverrideAlarm2",
state=state)
self.m.VerifyAll()
def test_set_watch_state_noexist(self):
state = watchrule.WatchRule.ALARM # State valid
self.m.StubOutWithMock(watchrule.WatchRule, 'load')
watchrule.WatchRule.load(self.ctx, "nonexistent")\
.AndRaise(exception.WatchRuleNotFound(watch_name='test'))
self.m.ReplayAll()
self.assertRaises(exception.WatchRuleNotFound,
self.eng.set_watch_state,
self.ctx, watch_name="nonexistent", state=state)
self.m.VerifyAll()
def test_stack_list_all_empty(self):
sl = self.eng.list_stacks(self.ctx)
self.assertEqual(0, len(sl))
def test_stack_describe_all_empty(self):
sl = self.eng.show_stack(self.ctx, None)
self.assertEqual(0, len(sl))
def test_lazy_load_resources(self):
stack_name = 'lazy_load_test'
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
lazy_load_template = {
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
}
templ = parser.Template(lazy_load_template)
stack = parser.Stack(self.ctx, stack_name, templ,
environment.Environment({}))
self.assertEqual(stack._resources, None)
self.assertEqual(stack._dependencies, None)
resources = stack.resources
self.assertEqual(type(resources), dict)
self.assertEqual(len(resources), 2)
self.assertEqual(type(resources.get('foo')),
generic_rsrc.GenericResource)
self.assertEqual(type(resources.get('bar')),
generic_rsrc.ResourceWithProps)
stack_dependencies = stack.dependencies
self.assertEqual(type(stack_dependencies), dependencies.Dependencies)
self.assertEqual(len(stack_dependencies.graph()), 2)
|
savi-dev/heat
|
heat/tests/test_engine_service.py
|
Python
|
apache-2.0
| 73,519
|
from . import BaseReporter
class LogReporter(BaseReporter):
""" Log based reporter.
"""
def __init__(self, output_log=None):
""" output will be logged to output_log
:param output_log: a python log object to output reports to.
"""
super(LogReporter, self).__init__()
self.logger = output_log
def output_values(self, counter_values):
logs = sorted(counter_values.iteritems(), cmp=lambda a, b: cmp(a[0], b[0]))
for k, v in logs:
if not (k.startswith("__") and k.endswith("__")): # don't output __node_reports__ etc.
self.logger.info("%s %s", k, v)
|
prismskylabs/pycounters
|
src/pycounters/reporters/logreporter.py
|
Python
|
apache-2.0
| 657
|
import sys
import threading
import traceback
def get_app_info_string():
"""
Get a string representing global information about the application. This is used for debugging.
:rtype: str
"""
app_info_list = _get_formatted_thread_stack_traces()
return '\n'.join(app_info_list)
def _get_formatted_thread_stack_traces():
"""
Get the formatted stack trace string for each currently running thread.
:rtype: list[str]
"""
formatted_traces = []
threads_by_id = {thread.ident: thread for thread in threading.enumerate()}
# The sys_current_frames() method is intended to be used for debugging like this.
for thread_id, stack in sys._current_frames().items(): # pylint: disable=protected-access
thread = threads_by_id.get(thread_id)
if thread:
thread_type = 'daemon' if thread.isDaemon() else 'nondaemon'
thread_stack_trace = ''.join(traceback.format_stack(stack))
formatted_traces.append('Current trace for {} thread "{}":\n{}'
.format(thread_type, thread.name, thread_stack_trace))
return formatted_traces
|
nickzuber/ClusterRunner
|
app/util/app_info.py
|
Python
|
apache-2.0
| 1,152
|
"""
SkCode internal tag test code.
"""
import unittest
from skcode.etree import RootTreeNode
from skcode.tags import (
TextTreeNode,
NewlineTreeNode,
HardNewlineTreeNode
)
class TextTagTestCase(unittest.TestCase):
""" Tests suite for the text tag module. """
def test_tag_constant_values(self):
""" Test tag constants. """
self.assertFalse(TextTreeNode.newline_closes)
self.assertFalse(TextTreeNode.same_tag_closes)
self.assertFalse(TextTreeNode.weak_parent_close)
self.assertFalse(TextTreeNode.standalone)
self.assertTrue(TextTreeNode.parse_embedded)
self.assertTrue(TextTreeNode.inline)
self.assertFalse(TextTreeNode.close_inlines)
self.assertIsNone(TextTreeNode.canonical_tag_name)
self.assertEqual((), TextTreeNode.alias_tag_names)
self.assertFalse(TextTreeNode.make_paragraphs_here)
def test_render_html(self):
""" Test the ``render_html`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', TextTreeNode, content='test')
output_result = tree_node.render_html('')
self.assertEqual('test', output_result)
def test_render_html_with_html_entities(self):
""" Test the ``render_html`` method with HTML entities. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', TextTreeNode, content='<test>')
output_result = tree_node.render_html('')
self.assertEqual('<test>', output_result)
def test_render_html_with_encoded_html_entities(self):
""" Test the ``render_html`` method with encoded HTML entities. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', TextTreeNode, content='<test>')
output_result = tree_node.render_html('')
self.assertEqual('<test>', output_result)
def test_render_text(self):
""" Test the ``render_text`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', TextTreeNode, content='test')
output_result = tree_node.render_text('')
self.assertEqual('test', output_result)
def test_render_text_with_html_entities(self):
""" Test the ``render_text`` method with HTML entities. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', TextTreeNode, content='<test>')
output_result = tree_node.render_text('')
self.assertEqual('<test>', output_result)
def test_render_text_with_encoded_html_entities(self):
""" Test the ``render_text`` method with encoded HTML entities. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', TextTreeNode, content='<test>')
output_result = tree_node.render_text('')
self.assertEqual('<test>', output_result)
class NewlineTagTestCase(unittest.TestCase):
""" Tests suite for the newline tag module. """
def test_tag_constant_values(self):
""" Test tag constants. """
self.assertFalse(NewlineTreeNode.newline_closes)
self.assertFalse(NewlineTreeNode.same_tag_closes)
self.assertFalse(TextTreeNode.weak_parent_close)
self.assertFalse(NewlineTreeNode.standalone)
self.assertTrue(NewlineTreeNode.parse_embedded)
self.assertTrue(NewlineTreeNode.inline)
self.assertFalse(NewlineTreeNode.close_inlines)
self.assertIsNone(NewlineTreeNode.canonical_tag_name)
self.assertEqual((), NewlineTreeNode.alias_tag_names)
self.assertFalse(NewlineTreeNode.make_paragraphs_here)
def test_render_html(self):
""" Test the ``render_html`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', NewlineTreeNode)
self.assertEqual('\n', tree_node.render_html(''))
def test_render_text(self):
""" Test the ``render_text`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', NewlineTreeNode)
self.assertEqual(' ', tree_node.render_text(''))
class HardNewlineTagTestCase(unittest.TestCase):
""" Tests suite for the (hard) newline tag module. """
def test_tag_constant_values(self):
""" Test tag constants. """
self.assertFalse(HardNewlineTreeNode.newline_closes)
self.assertFalse(HardNewlineTreeNode.same_tag_closes)
self.assertFalse(TextTreeNode.weak_parent_close)
self.assertFalse(HardNewlineTreeNode.standalone)
self.assertTrue(HardNewlineTreeNode.parse_embedded)
self.assertTrue(HardNewlineTreeNode.inline)
self.assertFalse(HardNewlineTreeNode.close_inlines)
self.assertIsNone(HardNewlineTreeNode.canonical_tag_name)
self.assertEqual((), HardNewlineTreeNode.alias_tag_names)
self.assertFalse(HardNewlineTreeNode.make_paragraphs_here)
def test_render_html(self):
""" Test the ``render_html`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', HardNewlineTreeNode)
self.assertEqual('<br>\n', tree_node.render_html(''))
def test_render_text(self):
""" Test the ``render_text`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('test', HardNewlineTreeNode)
self.assertEqual('\n', tree_node.render_text(''))
|
TamiaLab/PySkCode
|
tests/tests_tags/tests_internal.py
|
Python
|
agpl-3.0
| 5,497
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
laszip.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
class laszip(LAStoolsAlgorithm):
REPORT_SIZE = "REPORT_SIZE"
CREATE_LAX = "CREATE_LAX"
APPEND_LAX = "APPEND_LAX"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('laszip')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterBoolean(laszip.REPORT_SIZE,
self.tr("only report size"), False))
self.addParameter(ParameterBoolean(laszip.CREATE_LAX,
self.tr("create spatial indexing file (*.lax)"), False))
self.addParameter(ParameterBoolean(laszip.APPEND_LAX,
self.tr("append *.lax into *.laz file"), False))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
if (LAStoolsUtils.hasWine()):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "laszip.exe")]
else:
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "laszip")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
if self.getParameterValue(laszip.REPORT_SIZE):
commands.append("-size")
if self.getParameterValue(laszip.CREATE_LAX):
commands.append("-lax")
if self.getParameterValue(laszip.APPEND_LAX):
commands.append("-append")
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
alexbruy/QGIS
|
python/plugins/processing/algs/lidar/lastools/laszip.py
|
Python
|
gpl-2.0
| 3,010
|
from kvmap.code.projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
try:
from pyproj import Proj
from xml.etree import ElementTree as ET
except:
pass
class WMSOverlayServer(object):
cache = {}
available_maptype = dict(roadmap='Roadmap') # default
type = "wms"
'''Generic WMS server'''
def __init__(self, progress_callback=None):
self.progress_callback = progress_callback
def setProgressCallback(self, progress_callback):
self.progress_callback = progress_callback
def getInfo(self, lat, lon, epsilon):
return None
def get(self, parent, width, height):
self.bl = parent.bottom_left
self.tr = parent.top_right
self.zoom = parent.zoom
url = self.geturl(self.bl[0], self.bl[1], self.tr[0], self.tr[1], self.zoom, width, height)
if not url:
return None
key = hashlib.md5(url).hexdigest()
if key in self.cache:
return self.cache[key]
try:
image = Loader.image('http://' + self.provider_host + url, progress_callback=self.progress_callback)
self.cache[key] = image
except Exception, e:
Logger.error('OverlayServer could not find (or read) image %s [%s]' % (url, e))
image = None
def getLegendGraphic(self):
if self.legend is None and not self.triedlegend:
self.triedlegend = True
layer = self.layer
if "," in layer:
layer = layer[layer.rindex(",") + 1:]
if self.legendlayer:
layer = self.legendlayer
url = self.baseurl + "?REQUEST=GetLegendGraphic&VERSION=1.0.0&FORMAT=image/png&LAYER=%s&ext=.png" % (layer)
try:
print 'http://' + self.provider_host + url
image = Loader.image('http://' + self.provider_host + url)
self.legend = image
except Exception, e:
Logger.error('OverlayServer could not find LEGENDGRAPHICS for %s %s' % (self.baseurl, layer))
return self.legend
def xy_to_co(self, lat, lon):
if self.customBounds:
x, y = latlon_to_custom(lat, lon, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
x, y = lon, lat
elif self.isPGoogle: # patch for android - does not require pyproj library
x, y = latlon_to_google (lat, lon)
else:
x, y = transform(pLatlon, self.projection, lon, lat)
return x, y
def co_to_ll(self, x, y):
if self.customBounds:
u, v = custom_to_unit(lat, lon, self.bounds)
l, m = unit_to_latlon(u, v)
elif self.isPLatLon: # patch for android - does not require pyproj library
l, m = y, x
elif self.isPGoogle: # patch for android - does not require pyproj library
l, m = google_to_latlon (y, x)
else:
l, m = transform(self.projection, pLatlon, y, x)
return l, m
def geturl(self, lat1, lon1, lat2, lon2, zoom, w, h):
try:
x1, y1 = self.xy_to_co(lat1, lon1)
x2, y2 = self.xy_to_co(lat2, lon2)
return self.url + "&BBOX=%f,%f,%f,%f&WIDTH=%i&HEIGHT=%i&ext=.png" % (x1, y1, x2, y2, w, h)
except RuntimeError, e:
return None
def parseLayer(self, layer, data):
try:
name = layer.find("Name").text
except:
name = None
srss = layer.findall("SRS")
if name: # and srss:
data[name] = map(lambda x:x.text, srss)
if self.debug:
print "Provider %s provides layer %s in projections %s" % (self.provider_host, name, data[name])
subs = layer.findall("Layer")
for sub in subs:
self.parseLayer(sub, data)
def initFromGetCapabilities(self, host, baseurl, layer=None, index=0, srs=None):
self.debug = (layer == None) and (index == 0)
# GetCapabilities (Layers + SRS)
if layer is None or srs is None:
capabilities = urlopen(host + baseurl + "?SERVICE=WMS&VERSION=1.1.1&Request=GetCapabilities").read().strip()
try:
tree = ET.fromstring(capabilities)
if self.debug:
ET.dump(tree)
layers = tree.findall("Capability/Layer") # TODO: proper parsing of cascading layers and their SRS
data = {}
for l in layers:
self.parseLayer(l, data)
# Choose Layer and SRS by (alphabetical) index
if layer is None:
layer = sorted(data.keys())[index]
if srs is None:
srs = sorted(data[layer])[0]
except:
pass
print "Displaying from %s/%s: layer %s in SRS %s." % (host, baseurl, layer, srs)
# generate tile URL and init projection by EPSG code
self.layer = layer
self.baseurl = baseurl
self.url = baseurl + "?LAYERS=%s&SRS=%s&FORMAT=image/png&TRANSPARENT=TRUE&SERVICE=WMS&VERSION=1.1.1&REQUEST=GetMap&STYLES=" % (layer, srs)
self.isPGoogle = False
self.isPLatLon = False
self.legend = None
self.legendlayer = None
self.triedlegend = False
if srs == "EPSG:4326":
self.isPLatLon = True
elif srs == "EPSG:900913" or srs == "EPSG:3857":
self.isPGoogle = True
try:
self.projection = pGoogle
except:
pass
else:
try:
self.projection = Proj(init=srs)
except:
pass
|
jchome/LocalGuide-Mobile
|
kvmap/overlays/WMSOverlayServer.py
|
Python
|
gpl-2.0
| 5,614
|
from sys import argv
from grslra.scaling import *
import numpy as np
from grslra.tools import subspace_angle
from grslra.testdata import testdata_rpca_lmafit
from grslra.grpca import grpca
import time
# This experiment computes the phase transitions for incomplete observations (20% to 80% of missing entries)
# define scenarios
m = 200
n = 200
maxval = 0.6
step = 0.01
# estimate which scenarios cannot not be recovered, these will be skipped
impossible_thresh = 1.2
# Use 38th percentile as reference as there may be up to 60% large outliers
scaling = Scaling(percentile=38, val_at_percentile=0.17)
# high accuracy settings
p = 0.1
mu_end = 1E-8
params_grpca = {"PRINT": None, "VERBOSE": 0, "SMMPROD": False}
params_cg_Y = {}
params_cg_U = {}
values = np.arange(step, maxval + step, step)
nofvalues = values.size
nofscenarios = nofvalues ** 2 - 0.5 * ((2 - impossible_thresh) * nofvalues) ** 2
# initialize result array
result = np.zeros((nofvalues, nofvalues, 3))
result[:, :, 0] = 1.0
result[:, :, 1] = 90.0
result[:, :, 2] = 0
rate_Omega=float(argv[1])
card_Omega = np.int(np.round(rate_Omega * m * n))
# Estimate runtime of the evaluation
rho_min = step
k_min = np.int(np.round(step*np.minimum(m, n)))
t_start = time.time()
X, _, _, _,_ = testdata_rpca_lmafit(m, n, k_min, rho_min)
Omega = np.unravel_index(np.random.choice(m * n, card_Omega, replace=False), (m, n))
X = X[Omega]
grpca(X, k_min, p, mu_end, params=params_grpca, scaling=scaling, Omega=Omega, dimensions=(m,n))
t_min = time.time() - t_start
rho_max = maxval
k_max = np.int(np.round(maxval*np.minimum(m, n)))
t_start = time.time()
X, _, _, _,_ = testdata_rpca_lmafit(m, n, k_max, rho_max)
Omega = np.unravel_index(np.random.choice(m * n, card_Omega, replace=False), (m, n))
X = X[Omega]
grpca(X, k_min, p, mu_end, params=params_grpca, scaling=scaling, Omega=Omega, dimensions=(m,n))
t_max = time.time() - t_start
t_av = 0.5 * (t_max + t_min)
print "Estimated runtime: ", np.int(np.round(t_av * nofscenarios)), " seconds"
t_start = time.time()
for it_k_m in xrange(nofvalues):
k_m = values[it_k_m]
for it_rho in xrange(nofvalues):
rho = values[it_rho]
print "relative rank: ", k_m, "rho: ", rho
if it_k_m + it_rho > impossible_thresh * nofvalues:
result[it_k_m, it_rho, 0] = 1
result[it_k_m, it_rho, 1] = 90
result[it_k_m, it_rho, 1] = 0
continue
k = np.int(np.round(k_m * m))
X_0, L_0, S_0, U_0, Y_0 = testdata_rpca_lmafit(m, n, k, rho)
Omega = np.unravel_index(np.random.choice(m * n, card_Omega, replace=False), (m, n))
X = X_0[Omega]
t_startit = time.time()
try:
U, Y = grpca(X, k, p, mu_end, params=params_grpca, params_cg_U=params_cg_U, params_cg_Y=params_cg_Y, scaling=scaling, Omega=Omega, dimensions=(m, n))
t = time.time() - t_startit
L_p = np.dot(U,Y)
err = np.linalg.norm(L_p - L_0, ord='fro') / np.linalg.norm(L_0, ord='fro')
angle = subspace_angle(U, U_0)
print "Error: ", err
result[it_k_m, it_rho, 0] = err
result[it_k_m, it_rho, 1] = angle
result[it_k_m, it_rho, 2] = t
except:
pass
np.savez('result_' + '{:d}'.format(np.int(100*rate_Omega)), result=result, maxval=maxval, step=step)
print "Finished in ", time.time()-t_start, " seconds"
|
clemenshage/grslra
|
experiments/4_grpca/completion/completion.py
|
Python
|
mit
| 3,409
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import llnl.util.tty as tty
class Swiftsim(AutotoolsPackage):
"""SPH With Inter-dependent Fine-grained Tasking (SWIFT) provides
astrophysicists with a state of the art framework to perform
particle based simulations.
"""
homepage = 'http://icc.dur.ac.uk/swift/'
url = 'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0'
version('0.3.0', git='https://gitlab.cosma.dur.ac.uk/swift/swiftsim.git',
commit='254cc1b563b2f88ddcf437b1f71da123bb9db733')
variant('mpi', default=True,
description='Enable distributed memory parallelism')
# Build dependencies
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
# link-time / run-time dependencies
depends_on('mpi', when='+mpi')
depends_on('metis')
depends_on('hdf5~mpi', when='~mpi')
depends_on('hdf5+mpi', when='+mpi')
def setup_environment(self, spack_env, run_env):
# Needed to be able to download from the Durham gitlab repository
tty.warn('Setting "GIT_SSL_NO_VERIFY=1"')
tty.warn('This is needed to clone SWIFT repository')
spack_env.set('GIT_SSL_NO_VERIFY', 1)
def configure_args(self):
return ['--prefix=%s' % self.prefix,
'--enable-mpi' if '+mpi' in self.spec else '--disable-mpi',
'--with-metis={0}'.format(self.spec['metis'].prefix),
'--enable-optimization']
|
skosukhin/spack
|
var/spack/repos/builtin/packages/swiftsim/package.py
|
Python
|
lgpl-2.1
| 2,785
|
# Postr, a Flickr Uploader
#
# Copyright (C) 2006-2008 Ross Burton <ross@burtonini.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from gi.repository import Gtk, GConf
def on_url_clicked(button, url):
"""Global LinkButton handler that starts the default GNOME HTTP handler, or
firefox."""
# Get the HTTP URL handler
client = GConf.Client.get_default()
browser = client.get_string("/desktop/gnome/url-handlers/http/command") or "firefox"
# Because the world sucks and everyone hates me, just use the first word and
# hope that is enough. The problem is that some people have [epiphany %s]
# which means the & needs escaping or quoting, others have [iceweasel
# -remote "openurl(%s,newtab)"] which means the & must not be escaped or
# quoted. I can't see a general solution
browser = browser.split(" ")[0]
os.spawnlp(os.P_NOWAIT, browser, browser, url)
# TODO: if that didn't work fallback on x-www-browser or something
class AuthenticationDialog(Gtk.Dialog):
def __init__(self, parent, url):
Gtk.Dialog.__init__(self,
title=_("Flickr Uploader"), parent=parent,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
_("Continue"), Gtk.ResponseType.ACCEPT))
vbox = Gtk.VBox(spacing=8)
vbox.set_border_width(8)
label = Gtk.Label(_("Postr needs to login to Flickr to upload your photos. "
"Please click on the link below to login to Flickr."))
label.set_line_wrap(True)
vbox.add(label)
# Gtk.LinkButton is only in 2.10, so use a normal button if it isn't
# available.
if hasattr(Gtk, "LinkButton"):
Gtk.link_button_set_uri_hook(on_url_clicked)
button = Gtk.LinkButton(url, _("Login to Flickr"))
else:
button = Gtk.Button(_("Login to Flickr"))
button.connect("clicked", on_url_clicked, url)
vbox.add(button)
self.vbox.add(vbox)
self.show_all()
|
GNOME/postr
|
src/AuthenticationDialog.py
|
Python
|
gpl-2.0
| 2,724
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for list_session_groups."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
import tensorflow as tf
from tensorboard.backend.event_processing import plugin_event_multiplexer
from tensorboard.backend.event_processing import event_accumulator
from tensorboard.plugins import base_plugin
from tensorboard.plugins.hparams import api_pb2
from tensorboard.plugins.hparams import backend_context
from tensorboard.plugins.hparams import list_session_groups
from tensorboard.plugins.hparams import metadata
from tensorboard.plugins.hparams import plugin_data_pb2
DATA_TYPE_EXPERIMENT = 'experiment'
DATA_TYPE_SESSION_START_INFO = 'session_start_info'
DATA_TYPE_SESSION_END_INFO = 'session_end_info'
TensorEvent = event_accumulator.TensorEvent
class ListSessionGroupsTest(tf.test.TestCase):
def __init__(self, methodName='runTest'):
super(ListSessionGroupsTest, self).__init__(methodName)
# Make assertProtoEquals print all the diff.
self.maxDiff = None
def setUp(self):
self._mock_tb_context = tf.test.mock.create_autospec(
base_plugin.TBContext)
self._mock_multiplexer = tf.test.mock.create_autospec(
plugin_event_multiplexer.EventMultiplexer)
self._mock_tb_context.multiplexer = self._mock_multiplexer
self._mock_multiplexer.PluginRunToTagToContent.return_value = {
'' : {
metadata.EXPERIMENT_TAG :
self._serialized_plugin_data(
DATA_TYPE_EXPERIMENT, '''
description: 'Test experiment'
user: 'Test user'
hparam_infos: [
{
name: 'initial_temp'
type: DATA_TYPE_FLOAT64
},
{
name: 'final_temp'
type: DATA_TYPE_FLOAT64
},
{ name: 'string_hparam' },
{ name: 'bool_hparam' },
{ name: 'optional_string_hparam' }
]
metric_infos: [
{ name: { tag: 'current_temp' } },
{ name: { tag: 'delta_temp' } },
{ name: { tag: 'optional_metric' } }
]
''')
},
'session_1' : {
metadata.SESSION_START_INFO_TAG :
self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, '''
hparams:{ key: 'initial_temp' value: { number_value: 270 } },
hparams:{ key: 'final_temp' value: { number_value: 150 } },
hparams:{
key: 'string_hparam' value: { string_value: 'a string' }
},
hparams:{ key: 'bool_hparam' value: { bool_value: true } }
group_name: 'group_1'
start_time_secs: 314159
'''),
metadata.SESSION_END_INFO_TAG :
self._serialized_plugin_data(
DATA_TYPE_SESSION_END_INFO, '''
status: STATUS_SUCCESS
end_time_secs: 314164
''')
},
'session_2' : {
metadata.SESSION_START_INFO_TAG :
self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, '''
hparams:{ key: 'initial_temp' value: { number_value: 280 } },
hparams:{ key: 'final_temp' value: { number_value: 100 } },
hparams:{
key: 'string_hparam' value: { string_value: 'AAAAA' }
},
hparams:{ key: 'bool_hparam' value: { bool_value: false } }
group_name: 'group_2'
start_time_secs: 314159
'''),
metadata.SESSION_END_INFO_TAG :
self._serialized_plugin_data(
DATA_TYPE_SESSION_END_INFO, '''
status: STATUS_SUCCESS
end_time_secs: 314164
''')
},
'session_3' : {
metadata.SESSION_START_INFO_TAG :
self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, '''
hparams:{ key: 'initial_temp' value: { number_value: 280 } },
hparams:{ key: 'final_temp' value: { number_value: 100 } },
hparams:{
key: 'string_hparam' value: { string_value: 'AAAAA' }
},
hparams:{ key: 'bool_hparam' value: { bool_value: false } }
group_name: 'group_2'
start_time_secs: 314159
'''),
metadata.SESSION_END_INFO_TAG :
self._serialized_plugin_data(
DATA_TYPE_SESSION_END_INFO, '''
status: STATUS_SUCCESS
end_time_secs: 314164
''')
},
'session_4' : {
metadata.SESSION_START_INFO_TAG :
self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, '''
hparams:{ key: 'initial_temp' value: { number_value: 300 } },
hparams:{ key: 'final_temp' value: { number_value: 120 } },
hparams:{
key: 'string_hparam' value: { string_value: 'a string_3' }
},
hparams:{ key: 'bool_hparam' value: { bool_value: true } }
hparams:{
key: 'optional_string_hparam' value { string_value: 'BB' }
},
group_name: 'group_3'
start_time_secs: 314159
'''),
metadata.SESSION_END_INFO_TAG :
self._serialized_plugin_data(
DATA_TYPE_SESSION_END_INFO, '''
status: STATUS_SUCCESS
end_time_secs: 314164
''')
},
}
self._mock_multiplexer.Tensors.side_effect = self._mock_tensors
# A mock version of EventMultiplexer.Tensors
def _mock_tensors(self, run, tag):
result_dict = {
'session_1': {
'current_temp': [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(10.0))
],
'delta_temp': [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(20.0)),
TensorEvent(
wall_time=10, step=2,
tensor_proto=tf.make_tensor_proto(15.0))
],
'optional_metric' : [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(20.0)),
TensorEvent(
wall_time=2, step=20,
tensor_proto=tf.make_tensor_proto(33.0))
]
},
'session_2': {
'current_temp': [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(100.0)),
],
'delta_temp': [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(200.0)),
TensorEvent(
wall_time=10, step=2,
tensor_proto=tf.make_tensor_proto(150.0))
]
},
'session_3': {
'current_temp': [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(1.0)),
],
'delta_temp': [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(2.0)),
TensorEvent(
wall_time=10, step=2,
tensor_proto=tf.make_tensor_proto(1.5))
]
},
'session_4': {
'current_temp': [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(101.0)),
],
'delta_temp': [
TensorEvent(
wall_time=1, step=1,
tensor_proto=tf.make_tensor_proto(201.0)),
TensorEvent(
wall_time=10, step=2,
tensor_proto=tf.make_tensor_proto(-151.0))
]
},
}
return result_dict[run][tag]
def test_empty_request(self):
self._verify_full_response(
request='',
expected_response='''
total_size: 3
''')
def test_no_filter_no_sort(self):
self._verify_full_response(
request='''
start_index: 0
slice_size: 3
''',
expected_response='''
session_groups {
name: "group_1"
hparams { key: "bool_hparam" value { bool_value: true } }
hparams { key: "final_temp" value { number_value: 150.0 } }
hparams { key: "initial_temp" value { number_value: 270.0 } }
hparams { key: "string_hparam" value { string_value: "a string" } }
metric_values {
name { tag: "current_temp" }
value: 10
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: 15
training_step: 2
wall_time_secs: 10.0
}
metric_values { name { tag: "optional_metric" } value: 33
training_step: 20
wall_time_secs: 2.0
}
sessions {
name: "session_1"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_SUCCESS
metric_values {
name { tag: "current_temp" }
value: 10
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: 15
training_step: 2
wall_time_secs: 10.0
}
metric_values { name { tag: "optional_metric" } value: 33
training_step: 20
wall_time_secs: 2.0
}
}
}
session_groups {
name: "group_2"
hparams { key: "bool_hparam" value { bool_value: false } }
hparams { key: "final_temp" value { number_value: 100.0 } }
hparams { key: "initial_temp" value { number_value: 280.0 } }
hparams { key: "string_hparam" value { string_value: "AAAAA"}}
metric_values {
name { tag: "current_temp" }
value: 100
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: 150
training_step: 2
wall_time_secs: 10.0
}
sessions {
name: "session_2"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_SUCCESS
metric_values {
name { tag: "current_temp" }
value: 100
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: 150
training_step: 2
wall_time_secs: 10.0
}
}
sessions {
name: "session_3"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_SUCCESS
metric_values {
name { tag: "current_temp" }
value: 1.0
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: 1.5
training_step: 2
wall_time_secs: 10.0
}
}
}
session_groups {
name: "group_3"
hparams { key: "bool_hparam" value { bool_value: true } }
hparams { key: "final_temp" value { number_value: 120.0 } }
hparams { key: "initial_temp" value { number_value: 300.0 } }
hparams { key: "string_hparam" value { string_value: "a string_3"}}
hparams {
key: 'optional_string_hparam' value { string_value: 'BB' }
}
metric_values {
name { tag: "current_temp" }
value: 101.0
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: -151.0
training_step: 2
wall_time_secs: 10.0
}
sessions {
name: "session_4"
start_time_secs: 314159
end_time_secs: 314164
status: STATUS_SUCCESS
metric_values {
name { tag: "current_temp" }
value: 101.0
training_step: 1
wall_time_secs: 1.0
}
metric_values { name { tag: "delta_temp" } value: -151.0
training_step: 2
wall_time_secs: 10.0
}
}
}
total_size: 3
''')
def test_no_filter_no_sort_partial_slice(self):
self._verify_handler(
request='''
start_index: 1
slice_size: 1
''',
expected_session_group_names=["group_2"],
expected_total_size=3)
def test_filter_regexp(self):
self._verify_handler(
request='''
col_params: {
hparam: 'string_hparam'
filter_regexp: 'AA*'
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_2"],
expected_total_size=1)
# Test filtering out all session groups.
self._verify_handler(
request='''
col_params: {
hparam: 'string_hparam'
filter_regexp: 'a string_100'
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=[],
expected_total_size=0)
def test_filter_interval(self):
self._verify_handler(
request='''
col_params: {
hparam: 'initial_temp'
filter_interval: { min_value: 270 max_value: 282 }
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_1", "group_2"],
expected_total_size=2)
def test_filter_discrete_set(self):
self._verify_handler(
request='''
col_params: {
metric: { tag: 'current_temp' }
filter_discrete: { values: [{ number_value: 101.0 },
{ number_value: 10.0 }] }
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_1", "group_3"],
expected_total_size=2)
def test_filter_multiple_columns(self):
self._verify_handler(
request='''
col_params: {
metric: { tag: 'current_temp' }
filter_discrete: { values: [{ number_value: 101.0 },
{ number_value: 10.0 }] }
}
col_params: {
hparam: 'initial_temp'
filter_interval: { min_value: 270 max_value: 282 }
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_1"],
expected_total_size=1)
def test_filter_single_column_with_missing_values(self):
self._verify_handler(
request='''
col_params: {
hparam: 'optional_string_hparam'
filter_regexp: 'B*'
exclude_missing_values: true
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_3"],
expected_total_size=1)
self._verify_handler(
request='''
col_params: {
hparam: 'optional_string_hparam'
filter_regexp: 'B*'
exclude_missing_values: false
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_1", "group_2", "group_3"],
expected_total_size=3)
self._verify_handler(
request='''
col_params: {
metric: { tag: 'optional_metric' }
filter_discrete: { values: { number_value: 33.0 } }
exclude_missing_values: true
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_1"],
expected_total_size=1)
def test_sort_one_column(self):
self._verify_handler(
request='''
col_params: {
metric: { tag: 'delta_temp' }
order: ORDER_ASC
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_3", "group_1", "group_2"],
expected_total_size=3)
self._verify_handler(
request='''
col_params: {
hparam: 'string_hparam'
order: ORDER_ASC
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_2", "group_1", "group_3"],
expected_total_size=3)
# Test descending order.
self._verify_handler(
request='''
col_params: {
hparam: 'string_hparam'
order: ORDER_DESC
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_3", "group_1", "group_2"],
expected_total_size=3)
def test_sort_multiple_columns(self):
self._verify_handler(
request='''
col_params: {
hparam: 'bool_hparam'
order: ORDER_ASC
}
col_params: {
metric: { tag: 'delta_temp' }
order: ORDER_ASC
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_2", "group_3", "group_1"],
expected_total_size=3)
#Primary key in descending order. Secondary key in ascending order.
self._verify_handler(
request='''
col_params: {
hparam: 'bool_hparam'
order: ORDER_DESC
}
col_params: {
metric: { tag: 'delta_temp' }
order: ORDER_ASC
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_3", "group_1", "group_2"],
expected_total_size=3)
def test_sort_one_column_with_missing_values(self):
self._verify_handler(
request='''
col_params: {
metric: { tag: 'optional_metric' }
order: ORDER_ASC
missing_values_first: false
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_1", "group_2", "group_3"],
expected_total_size=3)
self._verify_handler(
request='''
col_params: {
metric: { tag: 'optional_metric' }
order: ORDER_ASC
missing_values_first: true
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_2", "group_3", "group_1"],
expected_total_size=3)
self._verify_handler(
request='''
col_params: {
hparam: 'optional_string_hparam'
order: ORDER_ASC
missing_values_first: false
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_3", "group_1", "group_2"],
expected_total_size=3)
self._verify_handler(
request='''
col_params: {
hparam: 'optional_string_hparam'
order: ORDER_ASC
missing_values_first: true
}
start_index: 0
slice_size: 3
''',
expected_session_group_names=["group_1", "group_2", "group_3"],
expected_total_size=3)
def _verify_full_response(self, request, expected_response):
request_proto = api_pb2.ListSessionGroupsRequest()
text_format.Merge(request, request_proto)
handler = list_session_groups.Handler(
backend_context.Context(self._mock_tb_context),
request_proto)
response = handler.run()
self.assertProtoEquals(expected_response, response)
def _verify_handler(
self, request, expected_session_group_names, expected_total_size):
request_proto = api_pb2.ListSessionGroupsRequest()
text_format.Merge(request, request_proto)
handler = list_session_groups.Handler(
backend_context.Context(self._mock_tb_context),
request_proto)
response = handler.run()
self.assertEqual(expected_session_group_names,
[sg.name for sg in response.session_groups])
self.assertEqual(expected_total_size, response.total_size)
def _serialized_plugin_data(self, data_oneof_field, text_protobuffer):
oneof_type_dict = {
DATA_TYPE_EXPERIMENT : api_pb2.Experiment,
DATA_TYPE_SESSION_START_INFO : plugin_data_pb2.SessionStartInfo,
DATA_TYPE_SESSION_END_INFO : plugin_data_pb2.SessionEndInfo
}
protobuffer = text_format.Merge(text_protobuffer,
oneof_type_dict[data_oneof_field]())
plugin_data = plugin_data_pb2.HParamsPluginData()
getattr(plugin_data, data_oneof_field).CopyFrom(protobuffer)
return metadata.create_summary_metadata(plugin_data).plugin_data.content
if __name__ == '__main__':
tf.test.main()
|
qiuminxu/tensorboard
|
tensorboard/plugins/hparams/list_session_groups_test.py
|
Python
|
apache-2.0
| 22,679
|
from flask import Flask, jsonify, abort, request
from goshna import *
from goshna import ApiFunctions
class Airport:
def __init__(self, id, airport_short, airport_full):
self.id = id
self.airport_short = airport_short
self.airport_full = airport_full
def to_json(self):
return {
'id': self.id,
'airport_short': self.airport_short,
'airport_full': self.airport_full
}
@app.route('/goshna/api/airports', methods=['GET'])
def get_airports():
airports = []
results = ApiFunctions.query_db("SELECT * FROM airports")
for row in results:
airport = Airport(row['id'], row['airport_short'], row['airport_full'])
airports.append(airport.to_json())
return jsonify({'airports': airports})
@app.route('/goshna/api/airports/<int:airport_id>', methods=['GET'])
def get_airport(airport_id):
row = ApiFunctions.query_db("SELECT * FROM airports WHERE id = ?", [airport_id], one=True)
if row is None:
abort(404)
airport = Airport(row['id'], row['airport_short'], row['airport_full'])
return jsonify({'airport': airport.to_json()})
@app.route('/goshna/api/airports', methods=['POST'])
def create_airport():
if not request.json or not 'airport_short' in request.json or not 'airport_full' in request.json:
abort(400)
airport_short = request.json['airport_short']
airport_full = request.json['airport_full']
result = ApiFunctions.post_db("INSERT INTO airports VALUES (NULL, ?, ?)", [airport_short, airport_full]);
inserted_id = c.lastrowid
print u'Inserted new airport at row ' + str(inserted_id)
return jsonify({'id': str(inserted_id)}), 201
@app.route('/goshna/api/airports/<int:airport_id>', methods=['DELETE'])
def delete_airport(airport_id):
ApiFunctions.post_db("DELETE FROM airports WHERE id=?", [airport_id])
print u'Deleted airport with ID ' + str(inserted_id)
return jsonify({'result': True})
|
sumiquitous/Goshna-Server
|
goshna/Airport.py
|
Python
|
mit
| 2,080
|
#! /usr/bin/env python
# -*- coding: UTF8 -*-
import os
import sqlite3
import sys
import datetime
import math
def stat_moyenne(ech):
if len(ech) == 0:
return 0
return sum(ech) / float(len(ech))
def stat_variance(ech):
if len(ech) == 0:
return 0
t1 = float(1. / (len(ech) - 1 ))
moy = stat_moyenne(ech)
t2 = 0.
for i in ech:
add = (i - moy) * (i - moy)
t2 += add
return t1 * t2
def stat_ecart_type(ech):
if len(ech) == 0:
return 0
variance = stat_variance(ech)
return math.sqrt(variance)
def stat_coeffvar(ech):
if len(ech) == 0:
return 0
return stat_ecart_type(ech) / stat_moyenne(ech)
def stat_mediane(ech):
if len(ech) == 0:
return 0
ech.sort()
if len(ech) / 2 == len(ech) / 2.:
n1 = float(ech[(len(ech) / 2) - 1])
n2 = float(ech[(len(ech) / 2)])
return (n1 + n2) / 2
else:
return ech[(len(ech)) / 2]
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
os.system("mkdir -p ./logs")
logfile = str("./logs/Voskload_" + str(datetime.datetime.now().strftime("%d-%m-%y")) + "_log_preload.txt")
sys.stdout = Logger(logfile)
os.system('rm -rf ./db')
os.system('rm -rf ./data_input/db.fasta')
os.system('rm -rf ./results')
os.system('mkdir ./db')
os.system('mkdir ./results')
print """
.
/|\ .
/ | \ ./|\,
,-' \|/ `-. <-=O=->
<'--==<O>==--`> '\|/`
`-. /|\ ,-' '
\ | /
\|/
'
_ __ __ __ __
| | / /___ _____/ /__/ /___ ____ _____/ /
| | / / __ \/ ___/ //_/ / __ \/ __ `/ __ /
| |/ / /_/ (__ ) ,< / / /_/ / /_/ / /_/ /
|___/\____/____/_/|_/_/\____/\__,_/\__,_/
¤ Voskload, Voskhod Preloader
Version 20170721
Voskhod Pipeline version V1.2
Part of the Voskhod project
https://github.com/egeeamu/voskhod
(GPL-3.0)
Arnaud Ungaro contact@arnaud-ungaro.fr
"""
conn = sqlite3.connect("./data_input/cdna_infos.db")
c = conn.cursor()
c.execute('''PRAGMA synchronous = OFF''')
c.execute('''PRAGMA journal_mode = OFF''')
c.execute('''PRAGMA cache_size = 4096''')
conn.commit()
#dico_cdnainfo = OrderedDict()
c.execute("""SELECT DISTINCT transcript_id,gene_id,gene_name,species,transcript_sequence FROM RESULT""")
conn.commit()
match = c.fetchone()
fasta = str("./data_input/db.fasta")
fastaw = open(fasta, "a+b")
listsizes = []
print "Reading cdna_info.db.. "
while match is not None:
Ensdart = str(match[0])
Ensdarg = str(match[1])
Gene_name = str(match[2])
Specie = str(match[3])
Seq = str(match[4])
#dico_cdnainfo[Ensdart] = {"Ensdart": Ensdart, "Ensdarg": Ensdarg, "Gene_name": Gene_name, "Specie": Specie}
#npres = Seq.upper().count("N")
npres = 0
if npres < 4:
fastaw.write(">" + Ensdart + "\n" + Seq + "\n")
if npres > 3:
print "Too many N in Transcript (" + str(npres) + ") : " + str(Ensdart) + " " + str(Seq)
listsizes.append(len(Seq))
match = c.fetchone()
fastaw.close()
print "Formating blast database.."
os.system('../../bin/ncbi-blast/bin/makeblastdb -dbtype nucl -in ./data_input/db.fasta -out ./db/db -title Voskhod_DB')
print "Formating blast database ok.. \n"
os.system('rm -rf ./data_input/db.fasta')
print "Mean size: " + str(int(stat_moyenne(listsizes)))
print "Median size: " + str(int(stat_mediane(listsizes)))
print "Coefficient of variation: " + str(round(stat_coeffvar(listsizes), 3))
print "Max size: " + str(int(max(listsizes)))
print "Min size: " + str(int(min(listsizes)))
print "Megabases: " + str(round(sum(listsizes)/1000000., 3))
|
egeeamu/voskhod
|
bin/voskload_preload_database_validate_contigs.py
|
Python
|
gpl-3.0
| 3,881
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-else-return, too-many-instance-attributes
from datetime import datetime, timedelta
from glob import glob
import imp
import os
import re
import sys
from flask import g, current_app
class HookError(Exception):
pass
class Dispatcher:
def __init__(self, app=None, plugins_dirs=None):
self.logger = None
self.hooks = {}
self.docs = {}
self.summaries = {}
self.help_all = ""
self.moto = ""
self.admins = []
self.plugins_dirs = []
self._maintain_to = datetime(year=1970, month=1, day=1)
self._maintain_by = None
if app:
self.init_app(app, plugins_dirs)
def init_app(self, app, plugins_dirs=None):
if plugins_dirs:
self.plugins_dirs = plugins_dirs
self.logger = app.logger
try:
self.plugins_dirs.append(app.config["EXTRA_PLUGINS_DIR"])
except KeyError:
pass
self.plugins_dirs.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "plugins"))
)
self.plugins_dirs.reverse() # plugins priority: built-in > fs > git
self.admins = app.config.get("ADMINS")
self.moto = app.config.get("MOTO")
app.extensions["dispatcher"] = self
self.load_hooks()
def add_hook(self, modname, command, func, hooks=None):
"""
add a hook to hook registry
:param modname: module from which the hook is loaded
:param command: hook name
:param func: hook function
:return: the actual registered command
"""
# add mod name to avoid command name clashes
hooks = hooks if hooks is not None else self.hooks
if command in hooks:
command = "%s-%s" % (modname, command)
command = command.replace("_", "-")
hooks[command.lower()] = func
if func.__doc__:
doc = func.__doc__.strip("\n ")
self.summaries[command] = doc.split("\n")[0]
self.docs[command] = doc
return command
def load_hooks(self):
hooks = {
"help": self._on_help,
"maintain": self._on_maintain,
}
for directory in self.plugins_dirs:
self.load_plugins(hooks, directory)
self.hooks = hooks
self._gen_help()
def load_plugins(self, hooks, plugin_dir, silent=True):
if not os.path.isdir(plugin_dir):
if not silent:
raise RuntimeError(
'"%s" is not a directory or does not exist' % plugin_dir
)
return
self.logger.info("loading plugins from %s", plugin_dir)
sys.path.insert(0, plugin_dir)
for plugin in glob(os.path.join(plugin_dir, "[!_]*.py")):
self.logger.info("loading plugin: %s", plugin)
plugin = os.path.basename(plugin)
modname = plugin[:-3]
try:
file, pathname, desc = imp.find_module(modname, [plugin_dir])
try:
mod = imp.load_module(modname, file, pathname, desc)
finally:
if file:
file.close()
for command in re.findall(r"on_(\w+)", " ".join(dir(mod))):
hookfun = getattr(mod, "on_" + command)
command = self.add_hook(modname, command, hookfun, hooks)
self.logger.info(
"attaching %s - %s to %s", plugin, hookfun, command
)
except:
# load time error
self.logger.exception(
"import failed on module %s, module not loaded", plugin
)
raise
self.logger.info("pluggings loaded from %s", plugin_dir)
def _gen_help(self):
docs = [self.moto] if self.moto else []
docs.extend(
"*%s* %s" % (command, doc) for command, doc in self.summaries.items()
)
self.help_all = "\n".join(docs)
def _on_help(self, which=None):
if which:
try:
return "*%s* %s" % (which, self.docs[which])
except KeyError:
return "No such command"
else:
return self.help_all
def _on_maintain(self, duration=3600):
if not g.user in current_app.config["ADMINS"]:
raise HookError("%s is not allowed to call maintain" % g.user)
try:
duration = float(duration)
except ValueError:
return (
"duration must be the number of seconds of the "
+ 'maintenance period; got "%s"' % duration
)
self._maintain_to = datetime.now() + timedelta(seconds=duration)
self._maintain_by = g.user
return (
"I'm now in maintenance mode and will automatically "
"be reset to normal at %s" % self._maintain_to.strftime("%Y-%m-%d %H:%M:%S")
)
def dispatch(self, command, args=None):
if command != "maintain" and datetime.now() < self._maintain_to:
# in maintenance mode
return (
"I'm currently under maintenance by %s and will be "
"available at %s"
% (self._maintain_by, self._maintain_to.strftime("%Y-%m-%d %H:%M:%S"))
)
try:
hook = self.hooks[command]
except KeyError:
return "No such command: %s" % command
try:
if args:
return hook(args)
else:
return hook()
except (Exception, SystemExit) as e:
self.logger.exception("error running command %s", command)
raise HookError("Error running command %s: %s" % (command, e))
|
tryagainconcepts/debot
|
debot/dispatcher.py
|
Python
|
mit
| 5,882
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# create a class for correlation matrix and other class to find out similar users who have rated a particular movie w.r.t. a particular user based on correlation
import numpy as np
import pandas as pd
class Correlation:
"""
"""
def pearson(self, rating_matrix):
return pd.DataFrame(rating_matrix.T).corr().as_matrix()
|
sagnik17/Movie-Recommendation-System
|
mrs/recsys/cf.py
|
Python
|
gpl-3.0
| 387
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Midokura PTE LTD.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from midonetclient import port_group_port
from midonetclient import resource_base
from midonetclient import vendor_media_type
from vendor_media_type import APPLICATION_PORTGROUP_PORT_COLLECTION_JSON
class PortGroup(resource_base.ResourceBase):
media_type = vendor_media_type.APPLICATION_PORTGROUP_JSON
def __init__(self, uri, dto, auth):
super(PortGroup, self).__init__(uri, dto, auth)
def name(self, name):
self.dto['name'] = name
return self
def tenant_id(self, tenant_id):
self.dto['tenantId'] = tenant_id
return self
def get_name(self):
return self.dto['name']
def get_id(self):
return self.dto['id']
def get_tenant_id(self):
return self.dto['tenantId']
def is_stateful(self):
return self.dto['stateful']
def stateful(self, v):
self.dto['stateful'] = v
return self
def get_ports(self, query=None):
headers = {'Accept': APPLICATION_PORTGROUP_PORT_COLLECTION_JSON}
return self.get_children(self.dto['ports'], query, headers,
port_group_port.PortGroupPort)
def add_port_group_port(self):
return port_group_port.PortGroupPort(self.dto['ports'], {}, self.auth)
|
celebdor/python-midonetclient
|
src/midonetclient/port_group.py
|
Python
|
apache-2.0
| 1,908
|
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Posix implementations of platform-specific functionality."""
import fcntl
import os
from tornado.platform import interface
from tornado.util import b
def set_close_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
class Waker(interface.Waker):
def __init__(self):
r, w = os.pipe()
_set_nonblocking(r)
_set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self.reader = os.fdopen(r, "rb", 0)
self.writer = os.fdopen(w, "wb", 0)
def fileno(self):
return self.reader.fileno()
def wake(self):
try:
self.writer.write(b("x"))
except IOError:
pass
def consume(self):
try:
while True:
result = self.reader.read()
if not result: break;
except IOError:
pass
def close(self):
self.reader.close()
self.writer.close()
|
jordoncm/ft2json
|
tornado/platform/posix.py
|
Python
|
apache-2.0
| 1,723
|
import os
import shutil
import sys
dest = 'py2only'
futures_dirname = 'concurrent'
DIST_DIR = os.path.realpath('kolibri/dist')
def hide_py2_modules():
"""
Move the directory of 'futures' and python2-only modules of 'future'
inside the directory 'py2only'
"""
# Move the directory of 'futures' inside the directory 'py2only'
_move_modules_to_py2only(futures_dirname)
# Future's submodules are not downloaded in Python 3 but only in Python 2
if sys.version_info[0] == 2:
from future.standard_library import TOP_LEVEL_MODULES
for module in TOP_LEVEL_MODULES:
if module == 'test':
continue
# Move the directory of submodules of 'future' inside 'py2only'
_move_modules_to_py2only(module)
def _move_modules_to_py2only(module_name):
module_src_path = os.path.join(DIST_DIR, module_name)
module_dst_path = os.path.join(DIST_DIR, dest, module_name)
shutil.move(module_src_path, module_dst_path)
if __name__ == '__main__':
# Temporarily add `kolibri/dist` to PYTHONPATH to import future
sys.path = sys.path + [os.path.realpath(os.path.join(DIST_DIR))]
try:
os.makedirs(os.path.join(DIST_DIR, dest))
except OSError:
raise
hide_py2_modules()
# Remove `kolibri/dist` from PYTHONPATH
sys.path = sys.path[:-1]
|
christianmemije/kolibri
|
build_tools/py2only.py
|
Python
|
mit
| 1,365
|
'''
Created on Oct 22, 2012
@author: Gary
'''
from housemonitor.hm.inputthead import InputThread
from housemonitor.hm.display import Display
from time import sleep
if __name__ == '__main__':
cv = {}
it = InputThread( cv )
it.start()
sleep( 15 )
display = Display( cv )
display.update()
display.run()
|
gary-pickens/HouseMonitor
|
housemonitor/hm/hmon.py
|
Python
|
mit
| 334
|
def generate_dispatcher(method_handler, parent_class=None):
"""
Create a dispatcher class and return an instance of it from a dispatcher
definition.
The definition is a class with the following attributes:
_ EXPORTED_METHOD: dictionary where keys are method names and values
class attribute names of the attributes holding references to an object
implementing the method
_ attributes defined in EXPORTED_METHODS values. They must contain an
object instance which implements the respective methods (EXPORTED_METHODS
keys)
Ex:
class TestDispatchHandler:
EXPORTED_METHODS = {'method1': 'attr1',
'method2': 'attr1',
'method3': 'attr2'}
attr1 = Object1()
attr2 = Object2()
where Object1 is a class which provides method1 and method2 and Object2 a
class which provides method3
obj_inst = generate_dispatcher(TestDispatchHandler)
will affect in 'obj_inst' a class instance which provide method1, method2
and method3 by delegate it to the correct object
"""
# class definition
if parent_class:
class_str = 'class Dispatcher(%s):\n' % parent_class
statements = ' %s.__init__(self)\n' % parent_class
else:
class_str = 'class Dispatcher:\n'
statements = ''
# methods definition
registered = []
for method, objname in method_handler.EXPORTED_METHODS.items():
if not objname in registered:
registered.append(objname)
class_str = '%s def %s(self, *attrs):\n return self.%s.%s(*attrs)\n'%\
(class_str, method, objname, method)
# constructor definition
attrs = ''
for objname in registered:
attrs = '%s, %s' % (attrs, objname)
statements = '%s self.%s=%s\n' % (statements, objname, objname)
# retrieve object reference in current context
exec '%s=getattr(method_handler, "%s")'%(objname, objname)
# assemble all parts
class_str = '%s def __init__(self%s):\n%s' % (class_str, attrs, statements)
# now we can eval the full class
exec class_str
# return an instance of constructed class
return eval('Dispatcher(%s)'%attrs[2:]) # attrs[2:] for removing ', '
|
ActiveState/code
|
recipes/Python/83048_Dynamic_generatidispatcher/recipe-83048.py
|
Python
|
mit
| 2,283
|
def is_funny(s):
codes = [ord(c) for c in s]
codes_len = len(codes)
for i in range(codes_len - 1):
d1 = codes[i + 1] - codes[i]
d2 = codes[-(i + 1)] - codes[-(i + 2)]
if abs(d1) != abs(d2):
return False
return True
t = int(input())
for _ in range(t):
print('Funny' if is_funny(input()) else 'Not Funny')
|
ahavrylyuk/hackerrank
|
python3/funny-string.py
|
Python
|
mit
| 361
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from djshop.apps.store.views import index
from djshop.apps.store.views import products
urlpatterns = [
url(r'^index/?$', index.index, name="index"),
# Products
url(r'^products/?$', products.index, name="view_products"),
url(r'^products/new/?$', products.new, name="new_product"),
url(r'^products/(?P<product_id>\d+)/?$', products.view, name="view_product"),
url(r'^products/(?P<product_id>\d+)/edit/?$', products.edit, name="edit_product"),
url(r'^products/(?P<product_id>\d+)/delete/?$', products.delete, name="delete_product"),
]
|
diegojromerolopez/djshop
|
src/djshop/apps/store/urls.py
|
Python
|
mit
| 622
|
from __future__ import print_function, absolute_import
import os
import sys
import imp
import json
import string
import shutil
import subprocess
import tempfile
from distutils.dep_util import newer_group
from distutils.core import Extension
from distutils.errors import DistutilsExecError
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_vars
from distutils.command.build_ext import build_ext as _build_ext
def find_packages():
"""Find all of mdtraj's python packages.
Adapted from IPython's setupbase.py. Copyright IPython
contributors, licensed under the BSD license.
"""
packages = ['mdtraj.scripts']
for dir,subdirs,files in os.walk('MDTraj'):
package = dir.replace(os.path.sep, '.')
if '__init__.py' not in files:
# not a package
continue
packages.append(package.replace('MDTraj', 'mdtraj'))
return packages
def check_dependencies(dependencies):
def module_exists(dep):
try:
imp.find_module(dep)
return True
except ImportError:
return False
for dep in dependencies:
if len(dep) == 1:
import_name, pkg_name = dep[0], dep[0]
elif len(dep) == 2:
import_name, pkg_name = dep
else:
raise ValueError(dep)
if not module_exists(import_name):
lines = [
'-' * 50,
'Warning: This package requires %r. Try' % import_name,
'',
' $ conda install %s' % pkg_name,
'',
'or:',
'',
' $ pip install %s' % pkg_name,
'-' * 50,
]
print(os.linesep.join(lines), file=sys.stderr)
################################################################################
# Detection of compiler capabilities
################################################################################
class CompilerDetection(object):
# Necessary for OSX. See https://github.com/mdtraj/mdtraj/issues/576
# The problem is that distutils.sysconfig.customize_compiler()
# is necessary to properly invoke the correct compiler for this class
# (otherwise the CC env variable isn't respected). Unfortunately,
# distutils.sysconfig.customize_compiler() DIES on OSX unless some
# appropriate initialization routines have been called. This line
# has a side effect of calling those initialzation routes, and is therefor
# necessary for OSX, even though we don't use the result.
_DONT_REMOVE_ME = get_config_vars()
def __init__(self, disable_openmp):
cc = new_compiler()
customize_compiler(cc)
self.msvc = cc.compiler_type == 'msvc'
self._print_compiler_version(cc)
if disable_openmp:
self.openmp_enabled = False
else:
self.openmp_enabled, openmp_needs_gomp = self._detect_openmp()
self.sse3_enabled = self._detect_sse3() if not self.msvc else True
self.sse41_enabled = self._detect_sse41() if not self.msvc else True
self.compiler_args_sse2 = ['-msse2'] if not self.msvc else ['/arch:SSE2']
self.compiler_args_sse3 = ['-mssse3'] if (self.sse3_enabled and not self.msvc) else []
self.compiler_args_sse41, self.define_macros_sse41 = [], []
if self.sse41_enabled:
self.define_macros_sse41 = [('__SSE4__', 1), ('__SSE4_1__', 1)]
if not self.msvc:
self.compiler_args_sse41 = ['-msse4']
if self.openmp_enabled:
self.compiler_libraries_openmp = []
if self.msvc:
self.compiler_args_openmp = ['/openmp']
else:
self.compiler_args_openmp = ['-fopenmp']
if openmp_needs_gomp:
self.compiler_libraries_openmp = ['gomp']
else:
self.compiler_libraries_openmp = []
self.compiler_args_openmp = []
if self.msvc:
self.compiler_args_opt = ['/O2']
else:
self.compiler_args_opt = ['-O3', '-funroll-loops']
print()
def _print_compiler_version(self, cc):
print("C compiler:")
try:
if self.msvc:
if not cc.initialized:
cc.initialize()
cc.spawn([cc.cc])
else:
cc.spawn([cc.compiler[0]] + ['-v'])
except DistutilsExecError:
pass
def hasfunction(self, funcname, include=None, libraries=None, extra_postargs=None):
# running in a separate subshell lets us prevent unwanted stdout/stderr
part1 = '''
from __future__ import print_function
import os
import json
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_vars
FUNCNAME = json.loads('%(funcname)s')
INCLUDE = json.loads('%(include)s')
LIBRARIES = json.loads('%(libraries)s')
EXTRA_POSTARGS = json.loads('%(extra_postargs)s')
''' % {
'funcname': json.dumps(funcname),
'include': json.dumps(include),
'libraries': json.dumps(libraries or []),
'extra_postargs': json.dumps(extra_postargs)}
part2 = '''
get_config_vars() # DON'T REMOVE ME
cc = new_compiler()
customize_compiler(cc)
for library in LIBRARIES:
cc.add_library(library)
status = 0
try:
with open('func.c', 'w') as f:
if INCLUDE is not None:
f.write('#include %s\\n' % INCLUDE)
f.write('int main(void) {\\n')
f.write(' %s;\\n' % FUNCNAME)
f.write('}\\n')
objects = cc.compile(['func.c'], output_dir='.',
extra_postargs=EXTRA_POSTARGS)
cc.link_executable(objects, 'a.out')
except Exception as e:
status = 1
exit(status)
'''
tmpdir = tempfile.mkdtemp(prefix='hasfunction-')
try:
curdir = os.path.abspath(os.curdir)
os.chdir(tmpdir)
with open('script.py', 'w') as f:
f.write(part1 + part2)
proc = subprocess.Popen(
[sys.executable, 'script.py'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate()
status = proc.wait()
finally:
os.chdir(curdir)
shutil.rmtree(tmpdir)
return status == 0
def _print_support_start(self, feature):
print('Attempting to autodetect {0:6} support...'.format(feature), end=' ')
def _print_support_end(self, feature, status):
if status is True:
print('Compiler supports {0}'.format(feature))
else:
print('Did not detect {0} support'.format(feature))
def _detect_openmp(self):
self._print_support_start('OpenMP')
hasopenmp = self.hasfunction('omp_get_num_threads()', extra_postargs=['-fopenmp', '/openmp'])
needs_gomp = hasopenmp
if not hasopenmp:
hasopenmp = self.hasfunction('omp_get_num_threads()', libraries=['gomp'])
needs_gomp = hasopenmp
self._print_support_end('OpenMP', hasopenmp)
return hasopenmp, needs_gomp
def _detect_sse3(self):
"Does this compiler support SSE3 intrinsics?"
self._print_support_start('SSE3')
result = self.hasfunction('__m128 v; _mm_hadd_ps(v,v)',
include='<pmmintrin.h>',
extra_postargs=['-msse3'])
self._print_support_end('SSE3', result)
return result
def _detect_sse41(self):
"Does this compiler support SSE4.1 intrinsics?"
self._print_support_start('SSE4.1')
result = self.hasfunction( '__m128 v; _mm_round_ps(v,0x00)',
include='<smmintrin.h>',
extra_postargs=['-msse4'])
self._print_support_end('SSE4.1', result)
return result
################################################################################
# Writing version control information to the module
################################################################################
def git_version():
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def write_version_py(VERSION, ISRELEASED, filename='MDTraj/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM MDTRAJ SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
else:
GIT_REVISION = 'Unknown'
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
class StaticLibrary(Extension):
def __init__(self, *args, **kwargs):
self.export_include = kwargs.pop('export_include', [])
Extension.__init__(self, *args, **kwargs)
class build_ext(_build_ext):
def build_extension(self, ext):
if isinstance(ext, StaticLibrary):
self.build_static_extension(ext)
else:
_build_ext.build_extension(self, ext)
def build_static_extension(self, ext):
from distutils import log
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
language = ext.language or self.compiler.detect_language(sources)
libname = os.path.splitext(os.path.basename(ext_path))[0]
output_dir = os.path.dirname(ext_path)
if (self.compiler.static_lib_format.startswith('lib') and
libname.startswith('lib')):
libname = libname[3:]
if not os.path.exists(output_dir):
# necessary for windows
os.makedirs(output_dir)
self.compiler.create_static_lib(objects,
output_libname=libname,
output_dir=output_dir,
target_lang=language)
for item in ext.export_include:
shutil.copy(item, output_dir)
|
rmcgibbo/msmbuilder
|
basesetup.py
|
Python
|
lgpl-2.1
| 12,526
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from test_softmax_op import stable_softmax
class TestSoftmaxWithCrossEntropyOp(OpTest):
"""
Test softmax with cross entropy operator with discreate one-hot labels.
"""
def initParams(self):
self.numeric_stable_mode = False
def setUp(self):
self.initParams()
self.op_type = "softmax_with_cross_entropy"
batch_size = 41
class_num = 37
logits = np.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float64")
softmax = np.apply_along_axis(stable_softmax, 1, logits)
labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64")
cross_entropy = np.asmatrix(
[[-np.log(softmax[i][labels[i][0]])]
for i in range(softmax.shape[0])],
dtype="float64")
self.inputs = {"Logits": logits, "Label": labels}
self.outputs = {
"Softmax": softmax.astype("float64"),
"Loss": cross_entropy.astype("float64")
}
self.attrs = {"numeric_stable_mode": self.numeric_stable_mode}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["Logits"], "Loss")
class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp):
def initParams(self):
self.numeric_stable_mode = True
class TestSoftmaxWithCrossEntropyOp2(OpTest):
"""
Test softmax with cross entropy operator with soft labels.
"""
def setUp(self):
self.op_type = "softmax_with_cross_entropy"
batch_size = 41
class_num = 37
logits = np.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float64")
softmax = np.apply_along_axis(stable_softmax, 1, logits)
labels = np.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float64")
labels /= np.sum(labels, axis=1, keepdims=True)
cross_entropy = (-labels * np.log(softmax)).sum(
axis=1, keepdims=True).astype("float64")
self.inputs = {"Logits": logits, "Label": labels}
self.outputs = {
"Softmax": softmax.astype("float64"),
"Loss": cross_entropy.astype("float64")
}
self.attrs = {"soft_label": True}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["Logits"], "Loss")
class TestSoftmaxWithCrossEntropyOp3(OpTest):
"""
Test softmax with cross entropy operator with ignore_index.
"""
def initParams(self):
self.numeric_stable_mode = False
def setUp(self):
self.initParams()
self.op_type = "softmax_with_cross_entropy"
batch_size = 41
class_num = 37
logits = np.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float64")
softmax = np.apply_along_axis(stable_softmax, 1, logits)
labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64")
ignore_index = 7
cross_entropy = np.asmatrix(
[[-np.log(softmax[i][labels[i][0]])]
if labels[i] != ignore_index else [0]
for i in range(softmax.shape[0])],
dtype="float64")
self.inputs = {"Logits": logits, "Label": labels}
self.outputs = {
"Softmax": softmax.astype("float64"),
"Loss": cross_entropy.astype("float64")
}
self.attrs = {
"ignore_index": ignore_index,
"numeric_stable_mode": self.numeric_stable_mode
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["Logits"], "Loss")
class TestSoftmaxWithCrossEntropyOp3NoCudnn(TestSoftmaxWithCrossEntropyOp3):
def initParams(self):
self.numeric_stable_mode = True
if __name__ == "__main__":
unittest.main()
|
reyoung/Paddle
|
python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py
|
Python
|
apache-2.0
| 4,712
|
from . import boxscore
from . import draft
from . import miscellaneous
from . import playbyplay
from . import player
from . import team
|
CardinalAdvising/py-Goldsberry
|
goldsberry/__init__.py
|
Python
|
mit
| 135
|
import pytest
from .fixtures import Adder, Divider
from ..brokers.standard import Standard as StandardBroker
from ..brokers.eager import Eager as EagerBroker
from ..brokers.base import Broker as BaseBroker
from ..connectors.dummy import Dummy as DummyConnector
class TestBrokerInterface(object):
def test_default_broker_interface(self):
with pytest.raises(NotImplementedError):
BaseBroker.add_job(StandardBroker(None), Adder, 'demo', {})
class TestStandardBroker(object):
@property
def connector(self):
return DummyConnector()
def test_broker_repr(self):
broker = StandardBroker(self.connector)
assert repr(broker) == 'Broker(Dummy)'
def test_add_job_to_broker(self):
broker = StandardBroker(self.connector)
broker.add_job(Adder, 2, 3)
queues = list(broker.connector.jobs.keys())
assert len(queues) == 1
assert queues[0] == 'sqjobs'
assert broker.connector.num_jobs == 1
messages = broker.connector.jobs['sqjobs']
assert len(messages) == 1
def test_right_payload_args_when_job_is_added(self):
broker = StandardBroker(self.connector)
broker.add_job(Adder, 2, 3)
message = broker.connector.jobs['sqjobs'][0]
del message['id'] # Check that exists, but we don't care about the value
assert message == {'args': (2, 3), 'kwargs': {}, 'name': 'adder'}
def test_right_payload_kwargs_when_job_is_added(self):
broker = StandardBroker(self.connector)
broker.add_job(Adder, num2=2, num1=3)
message = broker.connector.jobs['sqjobs'][0]
del message['id'] # Check that exists, but we don't care about the value
assert message == {'args': (), 'kwargs': {'num1': 3, 'num2': 2}, 'name': 'adder'}
def test_right_payload_both_when_job_is_added(self):
broker = StandardBroker(self.connector)
broker.add_job(Adder, 2, num2=3)
message = broker.connector.jobs['sqjobs'][0]
del message['id'] # Check that exists, but we don't care about the value
assert message == {'args': (2,), 'kwargs': {'num2': 3}, 'name': 'adder'}
def test_multiple_jobs_are_stored_correctly_by_the_broker(self):
broker = StandardBroker(self.connector)
job_ids, jobs = [], []
job_ids.append(broker.add_job(Adder, 1, 1))
job_ids.append(broker.add_job(Adder, 2, 2))
gen = broker.jobs('sqjobs')
for _ in range(2):
jobs.append(next(gen))
assert len(jobs) == 2
assert jobs[0] == {'id': job_ids[1].job_id, 'args': (2, 2), 'kwargs': {}, 'name': 'adder'}
assert jobs[1] == {'id': job_ids[0].job_id, 'args': (1, 1), 'kwargs': {}, 'name': 'adder'}
class TestEagerBroker(object):
def test_broker_repr(self):
broker = EagerBroker()
assert repr(broker) == 'Broker(Eager)'
def test_execute_job_eager_mode(self):
broker = EagerBroker()
result = broker.add_job(Adder, 2, 3)
assert result.result == 5
def test_eager_failure(self):
broker = EagerBroker()
with pytest.raises(ZeroDivisionError):
assert broker.add_job(Divider, 2, 0)
|
gnufede/sqjobs
|
sqjobs/tests/broker_test.py
|
Python
|
bsd-3-clause
| 3,222
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
import errno
from enum import Enum
import http.client
import logging
import optparse
import os
import pdb
import shutil
import subprocess
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_rpc_proxy,
initialize_datadir,
get_datadir_path,
log_filename,
p2p_port,
rpc_url,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
BITCOIND_PROC_WAIT_TIMEOUT = 60
class BitcoinTestFramework(object):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the following methods:
- __init__()
- add_options()
- setup_chain()
- setup_network()
- run_test()
The main() method should not be overridden.
This class also contains various public and private helper methods."""
# Methods to override in subclass test scripts.
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = []
self.bitcoind_processes = {}
self.mocktime = 0
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
raise NotImplementedError
# Main function. This should not be overridden by the subclass test scripts.
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
"""Start a bitcoind and return RPC connection to it"""
datadir = os.path.join(dirname, "node" + str(i))
if binary is None:
binary = os.getenv("BITCOIND", "bgoldd")
args = [binary, "-datadir=" + datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(self.mocktime), "-uacomment=testnode%d" % i]
if extra_args is not None:
args.extend(extra_args)
self.bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
self.log.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
self._wait_for_bitcoind_start(self.bitcoind_processes[i], datadir, i, rpchost)
self.log.debug("initialize_chain: RPC successfully started")
proxy = get_rpc_proxy(rpc_url(datadir, i, rpchost), i, timeout=timewait)
if self.options.coveragedir:
coverage.write_all_rpc_commands(self.options.coveragedir, proxy)
return proxy
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Start multiple bitcoinds, return RPC connections to them"""
if extra_args is None:
extra_args = [None] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(self.start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except:
# If one node failed to start, stop the others
# TODO: abusing self.nodes in this way is a little hacky.
# Eventually we should do a better job of tracking nodes
self.nodes.extend(rpcs)
self.stop_nodes()
self.nodes = []
raise
return rpcs
def stop_node(self, i):
"""Stop a bitcoind test node"""
self.log.debug("Stopping node %d" % i)
try:
self.nodes[i].stop()
except http.client.CannotSendRequest as e:
self.log.exception("Unable to stop node")
return_code = self.bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
del self.bitcoind_processes[i]
assert_equal(return_code, 0)
def stop_nodes(self):
"""Stop multiple bitcoind test nodes"""
for i in range(len(self.nodes)):
self.stop_node(i)
assert not self.bitcoind_processes.values() # All connections must be gone now
def assert_start_raises_init_error(self, i, dirname, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, dirname, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'bitcoind exited' in str(e) # node must have shutdown
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.bitcoind_processes[i].wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("BITCOIND", "bgoldd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.bitcoind_processes[i] = subprocess.Popen(args)
self.log.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
self._wait_for_bitcoind_start(self.bitcoind_processes[i], datadir, i)
self.log.debug("initialize_chain: RPC successfully started")
self.nodes = []
for i in range(MAX_NODES):
try:
self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))
except:
self.log.exception("Error connecting to node %d" % i)
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
def _wait_for_bitcoind_start(self, process, datadir, i, rpchost=None):
"""Wait for bitcoind to start.
This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization."""
while True:
if process.poll() is not None:
raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
try:
# Check if .cookie file to be created
rpc = get_rpc_proxy(rpc_url(datadir, i, rpchost), i, coveragedir=self.options.coveragedir)
rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(0.25)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some bitcoind binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bgoldd"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bgoldd"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
spiritlinxl/BTCGPU
|
test/functional/test_framework/test_framework.py
|
Python
|
mit
| 21,499
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tritonschedule documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 22 11:40:06 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tritonschedule'
copyright = '2016, tritonschedule'
author = 'tritonschedule'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'tritonschedule v'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tritonscheduledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tritonschedule.tex', 'tritonschedule Documentation',
'tritonschedule', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tritonschedule', 'tritonschedule Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tritonschedule', 'tritonschedule Documentation',
author, 'tritonschedule', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
|
brianhang/tritonscheduler
|
docs/conf.py
|
Python
|
mit
| 11,786
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from ._testing import _make_credentials
from google.api_core.exceptions import DeadlineExceeded
class Test___mutate_rows_request(unittest.TestCase):
def _call_fut(self, table_name, rows):
from google.cloud.bigtable.table import _mutate_rows_request
return _mutate_rows_request(table_name, rows)
@mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3)
def test__mutate_rows_too_many_mutations(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import TooManyMutationsError
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2", table=table),
]
rows[0].set_cell("cf1", b"c1", 1)
rows[0].set_cell("cf1", b"c1", 2)
rows[1].set_cell("cf1", b"c1", 3)
rows[1].set_cell("cf1", b"c1", 4)
with self.assertRaises(TooManyMutationsError):
self._call_fut("table", rows)
def test__mutate_rows_request(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2"),
]
rows[0].set_cell("cf1", b"c1", b"1")
rows[1].set_cell("cf1", b"c1", b"2")
result = self._call_fut("table", rows)
expected_result = _mutate_rows_request_pb(table_name="table")
entry1 = expected_result.entries.add()
entry1.row_key = b"row_key"
mutations1 = entry1.mutations.add()
mutations1.set_cell.family_name = "cf1"
mutations1.set_cell.column_qualifier = b"c1"
mutations1.set_cell.timestamp_micros = -1
mutations1.set_cell.value = b"1"
entry2 = expected_result.entries.add()
entry2.row_key = b"row_key_2"
mutations2 = entry2.mutations.add()
mutations2.set_cell.family_name = "cf1"
mutations2.set_cell.column_qualifier = b"c1"
mutations2.set_cell.timestamp_micros = -1
mutations2.set_cell.value = b"2"
self.assertEqual(result, expected_result)
class Test__check_row_table_name(unittest.TestCase):
def _call_fut(self, table_name, row):
from google.cloud.bigtable.table import _check_row_table_name
return _check_row_table_name(table_name, row)
def test_wrong_table_name(self):
from google.cloud.bigtable.table import TableMismatchError
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
with self.assertRaises(TableMismatchError):
self._call_fut("other_table", row)
def test_right_table_name(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
result = self._call_fut("table", row)
self.assertFalse(result)
class Test__check_row_type(unittest.TestCase):
def _call_fut(self, row):
from google.cloud.bigtable.table import _check_row_type
return _check_row_type(row)
def test_test_wrong_row_type(self):
from google.cloud.bigtable.row import ConditionalRow
row = ConditionalRow(row_key=b"row_key", table="table", filter_=None)
with self.assertRaises(TypeError):
self._call_fut(row)
def test_right_row_type(self):
from google.cloud.bigtable.row import DirectRow
row = DirectRow(row_key=b"row_key", table="table")
result = self._call_fut(row)
self.assertFalse(result)
class TestTable(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
TABLE_ID = "table-id"
TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
ROW_KEY = b"row-key"
ROW_KEY_1 = b"row-key-1"
ROW_KEY_2 = b"row-key-2"
ROW_KEY_3 = b"row-key-3"
FAMILY_NAME = u"family"
QUALIFIER = b"qualifier"
TIMESTAMP_MICROS = 100
VALUE = b"value"
_json_tests = None
@staticmethod
def _get_target_class():
from google.cloud.bigtable.table import Table
return Table
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_w_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def test_constructor_wo_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=False
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def _row_methods_helper(self):
client = self._make_client(
project="project-id", credentials=_make_credentials(), admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
row_key = b"row_key"
return table, row_key
def test_row_factory_direct(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_conditional(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_append(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.row(row_key, append=True)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_direct_row(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.direct_row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_conditional_row(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.conditional_row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_append_row(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.append_row(row_key)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_failure(self):
table, row_key = self._row_methods_helper()
with self.assertRaises(ValueError):
table.row(row_key, filter_=object(), append=True)
def test___eq__(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table1, table2)
def test___eq__type_differ(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = object()
self.assertNotEqual(table1, table2)
def test___ne__same_value(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
comparison_val = table1 != table2
self.assertFalse(comparison_val)
def test___ne__(self):
table1 = self._make_one("table_id1", None)
table2 = self._make_one("table_id2", None)
self.assertNotEqual(table1, table2)
def _create_test_helper(self, split_keys=[], column_families={}):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable_admin_v2.proto import table_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2,
)
from google.cloud.bigtable.column_family import ColumnFamily
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Patch API calls
client._table_admin_client = table_api
# Perform the method and check the result.
table.create(column_families=column_families, initial_split_keys=split_keys)
families = {
id: ColumnFamily(id, self, rule).to_pb()
for (id, rule) in column_families.items()
}
split = table_admin_messages_v2_pb2.CreateTableRequest.Split
splits = [split(key=split_key) for split_key in split_keys]
table_api.create_table.assert_called_once_with(
parent=self.INSTANCE_NAME,
table=table_pb2.Table(column_families=families),
table_id=self.TABLE_ID,
initial_splits=splits,
)
def test_create(self):
self._create_test_helper()
def test_create_with_families(self):
from google.cloud.bigtable.column_family import MaxVersionsGCRule
families = {"family": MaxVersionsGCRule(5)}
self._create_test_helper(column_families=families)
def test_create_with_split_keys(self):
self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"])
def test_exists(self):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_messages_v1_pb2,
)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client,
bigtable_table_admin_client,
)
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import BadRequest
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
# Create response_pb
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)]
)
# Patch API calls
client._table_admin_client = table_api
client._instance_admin_client = instance_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [
response_pb,
NotFound("testing"),
BadRequest("testing"),
]
# Perform the method and check the result.
table1 = instance.table(self.TABLE_ID)
table2 = instance.table("table-id2")
result = table1.exists()
self.assertEqual(True, result)
result = table2.exists()
self.assertEqual(False, result)
with self.assertRaises(BadRequest):
table2.exists()
def test_delete(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Patch API calls
client._table_admin_client = table_api
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = table.delete()
self.assertEqual(result, expected_result)
def _list_column_families_helper(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_pb
COLUMN_FAMILY_ID = "foo"
column_family = _ColumnFamilyPB()
response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family})
# Patch the stub used by the API method.
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
# Create expected_result.
expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)}
# Perform the method and check the result.
result = table.list_column_families()
self.assertEqual(result, expected_result)
def test_list_column_families(self):
self._list_column_families_helper()
def test_get_cluster_states(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
response_pb = _TablePB(
cluster_states={
"cluster-id1": _ClusterStatePB(INITIALIZING),
"cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE),
"cluster-id3": _ClusterStatePB(READY),
}
)
# Patch the stub used by the API method.
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
# build expected result
expected_result = {
u"cluster-id1": ClusterState(INITIALIZING),
u"cluster-id2": ClusterState(PLANNED_MAINTENANCE),
u"cluster-id3": ClusterState(READY),
}
# Perform the method and check the result.
result = table.get_cluster_states()
self.assertEqual(result, expected_result)
def _read_row_helper(self, chunks, expected_result, app_profile_id=None):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_filters import RowSampleFilter
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
# Create request_pb
request_pb = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request_pb
# Create response_iterator
if chunks is None:
response_iterator = iter(()) # no responses at all
else:
response_pb = _ReadRowsResponsePB(chunks=chunks)
response_iterator = iter([response_pb])
# Patch the stub used by the API method.
client._table_data_client = data_api
client._table_admin_client = table_api
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
# Perform the method and check the result.
filter_obj = RowSampleFilter(0.33)
result = None
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_row(self.ROW_KEY, filter_=filter_obj)
row_set = RowSet()
row_set.add_row_key(self.ROW_KEY)
expected_request = [
(
table.name,
{
"end_inclusive": False,
"row_set": row_set,
"app_profile_id": app_profile_id,
"end_key": None,
"limit": None,
"start_key": None,
"filter_": filter_obj,
},
)
]
self.assertEqual(result, expected_result)
self.assertEqual(mock_created, expected_request)
def test_read_row_miss_no__responses(self):
self._read_row_helper(None, None)
def test_read_row_miss_no_chunks_in_response(self):
chunks = []
self._read_row_helper(chunks, None)
def test_read_row_complete(self):
from google.cloud.bigtable.row_data import Cell
from google.cloud.bigtable.row_data import PartialRowData
app_profile_id = "app-profile-id"
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk]
expected_result = PartialRowData(row_key=self.ROW_KEY)
family = expected_result._cells.setdefault(self.FAMILY_NAME, {})
column = family.setdefault(self.QUALIFIER, [])
column.append(Cell.from_pb(chunk))
self._read_row_helper(chunks, expected_result, app_profile_id)
def test_read_row_more_than_one_row_returned(self):
app_profile_id = "app-profile-id"
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk_1, chunk_2]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None, app_profile_id)
def test_read_row_still_partial(self):
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
)
# No "commit row".
chunks = [chunk]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None)
def test_mutate_rows(self):
from google.rpc.status_pb2 import Status
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
client._table_admin_client = table_api
table = self._make_one(self.TABLE_ID, instance)
response = [Status(code=0), Status(code=1)]
mock_worker = mock.Mock(return_value=response)
with mock.patch(
"google.cloud.bigtable.table._RetryableMutateRowsWorker",
new=mock.MagicMock(return_value=mock_worker),
):
statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()])
result = [status.code for status in statuses]
expected_result = [0, 1]
self.assertEqual(result, expected_result)
def test_read_rows(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable.row_data import PartialRowsData
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
app_profile_id = "app-profile-id"
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
# Create request_pb
request = retry = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request
# Create expected_result.
expected_result = PartialRowsData(
client._table_data_client.transport.read_rows, request, retry
)
# Perform the method and check the result.
start_key = b"start-key"
end_key = b"end-key"
filter_obj = object()
limit = 22
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_rows(
start_key=start_key,
end_key=end_key,
filter_=filter_obj,
limit=limit,
retry=retry,
)
self.assertEqual(result.rows, expected_result.rows)
self.assertEqual(result.retry, expected_result.retry)
created_kwargs = {
"start_key": start_key,
"end_key": end_key,
"filter_": filter_obj,
"limit": limit,
"end_inclusive": False,
"app_profile_id": app_profile_id,
"row_set": None,
}
self.assertEqual(mock_created, [(table.name, created_kwargs)])
def test_read_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.api_core import retry
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
for row in table.read_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows
):
rows.append(row)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2
):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_rows_with_row_set(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable.row_set import RowRange
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_3 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_3,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_3 = _ReadRowsResponseV2([chunk_3])
response_iterator = _MockReadRowsIterator(response_1, response_2, response_3)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
rows = []
row_set = RowSet()
row_set.add_row_range(
RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2)
)
row_set.add_row_key(self.ROW_KEY_3)
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(row_set=row_set):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
self.assertEqual(rows[0].row_key, self.ROW_KEY_1)
self.assertEqual(rows[1].row_key, self.ROW_KEY_2)
self.assertEqual(rows[2].row_key, self.ROW_KEY_3)
def test_sample_row_keys(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
response_iterator = object() # Just passed to a mock.
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["sample_row_keys"] = mock.Mock(
side_effect=[[response_iterator]]
)
# Create expected_result.
expected_result = response_iterator
# Perform the method and check the result.
result = table.sample_row_keys()
self.assertEqual(result[0], expected_result)
def test_truncate(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # truncate() has no return value.
with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME):
result = table.truncate()
table_api.drop_row_range.assert_called_once_with(
name=self.TABLE_NAME, delete_all_data_from_table=True
)
self.assertEqual(result, expected_result)
def test_truncate_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # truncate() has no return value.
timeout = 120
result = table.truncate(timeout=timeout)
self.assertEqual(result, expected_result)
def test_drop_by_prefix(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # drop_by_prefix() has no return value.
row_key_prefix = "row-key-prefix"
result = table.drop_by_prefix(row_key_prefix=row_key_prefix)
self.assertEqual(result, expected_result)
def test_drop_by_prefix_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # drop_by_prefix() has no return value.
row_key_prefix = "row-key-prefix"
timeout = 120
result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout)
self.assertEqual(result, expected_result)
def test_mutations_batcher_factory(self):
flush_count = 100
max_row_bytes = 1000
table = self._make_one(self.TABLE_ID, None)
mutation_batcher = table.mutations_batcher(
flush_count=flush_count, max_row_bytes=max_row_bytes
)
self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID)
self.assertEqual(mutation_batcher.flush_count, flush_count)
self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes)
class Test__RetryableMutateRowsWorker(unittest.TestCase):
from grpc import StatusCode
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
TABLE_ID = "table-id"
# RPC Status Codes
SUCCESS = StatusCode.OK.value[0]
RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0]
RETRYABLE_2 = StatusCode.ABORTED.value[0]
NON_RETRYABLE = StatusCode.CANCELLED.value[0]
@staticmethod
def _get_target_class_for_worker():
from google.cloud.bigtable.table import _RetryableMutateRowsWorker
return _RetryableMutateRowsWorker
def _make_worker(self, *args, **kwargs):
return self._get_target_class_for_worker()(*args, **kwargs)
@staticmethod
def _get_target_class_for_table():
from google.cloud.bigtable.table import Table
return Table
def _make_table(self, *args, **kwargs):
return self._get_target_class_for_table()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def _make_responses_statuses(self, codes):
from google.rpc.status_pb2 import Status
response = [Status(code=code) for code in codes]
return response
def _make_responses(self, codes):
import six
from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse
from google.rpc.status_pb2 import Status
entries = [
MutateRowsResponse.Entry(index=i, status=Status(code=codes[i]))
for i in six.moves.xrange(len(codes))
]
return MutateRowsResponse(entries=entries)
def test_callable_empty_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker()
self.assertEqual(len(statuses), 0)
def test_callable_no_retry_strategy(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Attempt to mutate the rows w/o any retry strategy.
# Expectation:
# - Since no retry, should return statuses as they come back.
# - Even if there are retryable errors, no retry attempt is made.
# - State of responses_statuses should be
# [success, retryable, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
with mock.patch("google.cloud.bigtable.table.wrap_method") as patched:
patched.return_value = mock.Mock(return_value=[response])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=None)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once()
self.assertEqual(result, expected_result)
def test_callable_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import DEFAULT_RETRY
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Initial attempt will mutate all 3 rows.
# Expectation:
# - First attempt will result in one retryable error.
# - Second attempt will result in success for the retry-ed row.
# - Check MutateRows is called twice.
# - State of responses_statuses should be
# [success, success, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response_1 = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
response_2 = self._make_responses([self.SUCCESS])
# Patch the stub used by the API method.
client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock(
side_effect=[[response_1], [response_2]]
)
retry = DEFAULT_RETRY.with_delay(initial=0.1)
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=retry)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(
client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2
)
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_empty_rows(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker._do_mutate_retryable_rows()
self.assertEqual(len(statuses), 0)
def test_do_mutate_retryable_rows(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 2 rows.
# Action:
# - Initial attempt will mutate all 2 rows.
# Expectation:
# - Expect [success, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Initial attempt will mutate all 3 rows.
# Expectation:
# - Second row returns retryable error code, so expect a raise.
# - State of responses_statuses should be
# [success, retryable, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 4 rows.
# - First try results:
# [success, retryable, non-retryable, retryable]
# Action:
# - Second try should re-attempt the 'retryable' rows.
# Expectation:
# - After second try:
# [success, success, non-retryable, retryable]
# - One of the rows tried second time returns retryable error code,
# so expect a raise.
# - Exception contains response whose index should be '3' even though
# only two rows were retried.
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.SUCCESS, self.RETRYABLE_1])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.SUCCESS,
self.NON_RETRYABLE,
self.RETRYABLE_1,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 4 rows.
# - First try results:
# [success, retryable, non-retryable, retryable]
# Action:
# - Second try should re-attempt the 'retryable' rows.
# Expectation:
# - After second try:
# [success, non-retryable, non-retryable, success]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.NON_RETRYABLE,
self.NON_RETRYABLE,
self.SUCCESS,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try_no_retryable(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 2 rows.
# - First try results: [success, non-retryable]
# Action:
# - Second try has no row to retry.
# Expectation:
# - After second try: [success, non-retryable]
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
worker = self._make_worker(client, table.name, [row_1, row_2])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.NON_RETRYABLE]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_mismatch_num_responses(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
with self.assertRaises(RuntimeError):
worker._do_mutate_retryable_rows()
class Test__create_row_request(unittest.TestCase):
def _call_fut(
self,
table_name,
start_key=None,
end_key=None,
filter_=None,
limit=None,
end_inclusive=False,
app_profile_id=None,
row_set=None,
):
from google.cloud.bigtable.table import _create_row_request
return _create_row_request(
table_name,
start_key=start_key,
end_key=end_key,
filter_=filter_,
limit=limit,
end_inclusive=end_inclusive,
app_profile_id=app_profile_id,
row_set=row_set,
)
def test_table_name_only(self):
table_name = "table_name"
result = self._call_fut(table_name)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_row_range_row_set_conflict(self):
with self.assertRaises(ValueError):
self._call_fut(None, end_key=object(), row_set=object())
def test_row_range_start_key(self):
table_name = "table_name"
start_key = b"start_key"
result = self._call_fut(table_name, start_key=start_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(start_key_closed=start_key)
self.assertEqual(result, expected_result)
def test_row_range_end_key(self):
table_name = "table_name"
end_key = b"end_key"
result = self._call_fut(table_name, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(end_key_open=end_key)
self.assertEqual(result, expected_result)
def test_row_range_both_keys(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(table_name, start_key=start_key, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_open=end_key
)
self.assertEqual(result, expected_result)
def test_row_range_both_keys_inclusive(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(
table_name, start_key=start_key, end_key=end_key, end_inclusive=True
)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_closed=end_key
)
self.assertEqual(result, expected_result)
def test_with_filter(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
table_name = "table_name"
row_filter = RowSampleFilter(0.33)
result = self._call_fut(table_name, filter_=row_filter)
expected_result = _ReadRowsRequestPB(
table_name=table_name, filter=row_filter.to_pb()
)
self.assertEqual(result, expected_result)
def test_with_limit(self):
table_name = "table_name"
limit = 1337
result = self._call_fut(table_name, limit=limit)
expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit)
self.assertEqual(result, expected_result)
def test_with_row_set(self):
from google.cloud.bigtable.row_set import RowSet
table_name = "table_name"
row_set = RowSet()
result = self._call_fut(table_name, row_set=row_set)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_with_app_profile_id(self):
table_name = "table_name"
limit = 1337
app_profile_id = "app-profile-id"
result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id)
expected_result = _ReadRowsRequestPB(
table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id
)
self.assertEqual(result, expected_result)
def _ReadRowsRequestPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsRequest(*args, **kw)
class Test_ClusterState(unittest.TestCase):
def test___eq__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
self.assertEqual(state1, state2)
def test___eq__type_differ(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = object()
self.assertNotEqual(state1, state2)
def test___ne__same_value(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
comparison_val = state1 != state2
self.assertFalse(comparison_val)
def test___ne__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
INITIALIZING = enum_table.ReplicationState.INITIALIZING
state1 = ClusterState(READY)
state2 = ClusterState(INITIALIZING)
self.assertNotEqual(state1, state2)
def test__repr__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
replication_dict = {
STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
INITIALIZING: "INITIALIZING",
PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE",
UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE",
READY: "READY",
}
self.assertEqual(
str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN]
)
self.assertEqual(
str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING]
)
self.assertEqual(
str(ClusterState(PLANNED_MAINTENANCE)),
replication_dict[PLANNED_MAINTENANCE],
)
self.assertEqual(
str(ClusterState(UNPLANNED_MAINTENANCE)),
replication_dict[UNPLANNED_MAINTENANCE],
)
self.assertEqual(str(ClusterState(READY)), replication_dict[READY])
self.assertEqual(
ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN
)
self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING)
self.assertEqual(
ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE
)
self.assertEqual(
ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE
)
self.assertEqual(ClusterState(READY).replication_state, READY)
def _ReadRowsResponseCellChunkPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
family_name = kw.pop("family_name")
qualifier = kw.pop("qualifier")
message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw)
message.family_name.value = family_name
message.qualifier.value = qualifier
return message
def _ReadRowsResponsePB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsResponse(*args, **kw)
def _mutate_rows_request_pb(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2
return data_messages_v2_pb2.MutateRowsRequest(*args, **kw)
class _MockReadRowsIterator(object):
def __init__(self, *values):
self.iter_values = iter(values)
def next(self):
return next(self.iter_values)
__next__ = next
class _MockFailureIterator_1(object):
def next(self):
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _MockFailureIterator_2(object):
def __init__(self, *values):
self.iter_values = values[0]
self.calls = 0
def next(self):
self.calls += 1
if self.calls == 1:
return self.iter_values[0]
else:
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _ReadRowsResponseV2(object):
def __init__(self, chunks, last_scanned_row_key=""):
self.chunks = chunks
self.last_scanned_row_key = last_scanned_row_key
def _TablePB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table(*args, **kw)
def _ColumnFamilyPB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.ColumnFamily(*args, **kw)
def _ClusterStatePB(replication_state):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table.ClusterState(replication_state=replication_state)
def _read_rows_retry_exception(exc):
return isinstance(exc, DeadlineExceeded)
|
tseaver/google-cloud-python
|
bigtable/tests/unit/test_table.py
|
Python
|
apache-2.0
| 69,707
|
import os
from flask import Blueprint, render_template, session, redirect, url_for, request, abort, flash
from flask import current_app, send_from_directory
from itsdangerous import URLSafeSerializer, BadSignature
from werkzeug import secure_filename
from flask.ext.login import current_user, login_user, logout_user
from flask.ext.mail import Message
from app.helpers import login_required, allowed_file
from app import db, login_manager
from .models import Donation
from .forms import DonationUploadForm, PledgingForm
from .models import Donation
donors = Blueprint(
'donors',
__name__,
template_folder='templates',
url_prefix='/donors'
)
@donors.route('/uploads/<filename>/')
def uploads(filename):
return url_for('static', filename='uploads/' + filename)
# return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@donors.route('/', methods=['GET', 'POST'])
def index():
error = None
donations = db.session.query(Donation).filter(Donation.user_id==current_user.id).all()
return render_template('donors/index.html', error=error, donations=donations)
@donors.route('/create/', methods=['GET', 'POST'])
def create():
error = None
form=DonationUploadForm()
if request.method == 'POST' and form.validate_on_submit():
file = request.files['upload_file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))
donation = Donation()
form.populate_obj(donation)
donation.type = 2
donation.filename = filename
donation.user_id = current_user.id
db.session.add(donation)
db.session.commit()
return redirect(url_for('donors.index'))
return render_template('donors/create.html', form=form, error=error)
@donors.route('/pledging/', methods=['GET', 'POST'])
def pledging():
error = None
form=PledgingForm()
if request.method == 'POST' and form.validate_on_submit():
donation = Donation()
form.populate_obj(donation)
donation.type = 1
donation.user_id = current_user.id
db.session.add(donation)
db.session.commit()
return redirect(url_for('donors.index'))
return render_template('donors/pledging.html', form=form, error=error)
@donors.errorhandler(413)
def error_handler_413(e):
return render_template('images/413.html'), 413
@donors.route('/delete/', methods=['POST'])
@donors.route('/delete/<id>', methods=['POST'])
@login_required
def delete(id=None):
"""Delete an uploaded file."""
donation = db.session.query(Donation).get(id)
if donor and donation.filename:
try:
filename = secure_filename(file.filename)
file.delete(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))
except:
pass
db.session.delete(donation)
db.session.commit()
return redirect(url_for('.index'))
|
teamgivn/givnapp
|
app/blueprints/donors/views.py
|
Python
|
mit
| 3,057
|
#! /usr/bin/python3
import curses as c
from pyotp import TOTP
from math import floor
from sys import exit
import os
import bcrypt
import json
from aes import AESCipher
import base64
from time import strftime
from getpass import getuser
cotp_dir = os.path.dirname(os.path.realpath(__file__))
user_name = str(getuser())
def init():
global scr, height, width
scr = c.initscr()
height, width = scr.getmaxyx()
c.noecho()
c.cbreak()
scr.border(0)
scr.keypad(True)
scr.clear()
c.curs_set(False)
c.start_color()
c.use_default_colors()
def prompt(text, prompt_type='notify', choice_1='Okay', choice_2='Cancel', textbox_type='text'):
text_input, button_1, button_2 = None, None, None
# Configure window
if prompt_type == 'notify':
wh = 5
ww = len(text) + 4 if len(text) + 4 > len(choice_1) + 12 else len(choice_1) + 12
button_1 = choice_1
elif prompt_type == 'choice':
wh = 5
ww = len(text) + 4 if len(text) + 4 > len(choice_1) + len(choice_2) + 9 else len(choice_1) + len(choice_2) + 11
button_1, button_2 = choice_1, choice_2
elif prompt_type == 'text':
wh = 5
ww = len(text) + 10
text_input = ''
# Window box
wy, wx = floor(height / 2 - wh / 2), floor(width / 2 - ww / 2)
window_box(wx, wy, ww, wh, text)
# User input
if prompt_type == 'notify':
scr.addstr(wy + 3, wx + floor(ww / 2 - ((len(button_1) + 2) / 2)) - 1, '[ ' + button_1 + ' ]', c.A_REVERSE + c.A_UNDERLINE + c.A_BOLD)
while True:
key = scr.getch()
if key == c.KEY_ENTER or key == 10:
return
elif prompt_type == 'choice':
selection = 0
while True:
scr.addstr(wy + 3, wx + floor(ww / 2) - floor((len(button_1) + len(button_2) + 11) / 2) + 1, '[ ' + button_1 + ' ]', c.A_REVERSE + (c.A_UNDERLINE + c.A_BOLD if selection == 0 else c.A_NORMAL))
scr.addstr(wy + 3, wx + floor(ww / 2) - floor((len(button_1) + len(button_2) + 11) / 2) + len(button_1) + 6, '[ ' + button_2 + ' ]', c.A_REVERSE + (c.A_UNDERLINE + c.A_BOLD if selection == 1 else c.A_NORMAL))
key = scr.getch()
if key == c.KEY_RIGHT:
selection = 1
elif key == c.KEY_LEFT:
selection = 0
elif key == c.KEY_ENTER or key == 10:
return choice_1 if selection == 0 else choice_2
elif prompt_type == 'text':
while True:
if textbox_type == 'text':
visible_input = text_input[-(ww - 9):]
elif textbox_type == 'password':
visible_input = '*' * len(text_input[-(ww - 9):])
visible_input += '_'
scr.addstr(wy + 3, wx + 2, '[ ' + visible_input + ' ' * (ww - len(visible_input) - 8) + ' ]', c.A_REVERSE)
key = scr.getch()
if key == c.KEY_BACKSPACE:
text_input = text_input[:-1]
elif key == c.KEY_ENTER or key == 10:
return text_input
else:
text_input += chr(key)
def window_box(wx=0, wy=0, ww=2, wh=2, title=''):
scr.clear()
background(c.COLOR_BLUE)
for y in range(wy, wy + wh):
scr.addstr(y, wx, ' ' * ww, c.A_REVERSE)
for y in range(wy + 1, wy + wh + 1):
scr.addstr(y, wx + ww, ' ')
scr.addstr(wy + wh, wx + 1, ' ' * ww, c.A_HORIZONTAL)
scr.addstr(wy + 1, wx + floor(ww / 2 - len(title) / 2), title, c.A_REVERSE + c.A_BOLD)
def load_files():
global config, services
if not os.path.exists(cotp_dir + '/users'):
os.makedirs(cotp_dir + '/users')
if not os.path.isfile(cotp_dir + '/users/' + user_name + '.json'):
with open(cotp_dir + '/users/' + user_name + '.json', 'w') as services_file:
services_file.write('{"cipher": "", "services": []}')
with open(cotp_dir + '/config.json') as config_file:
config = json.load(config_file)
with open(cotp_dir + '/users/' + user_name + '.json') as services_file:
services = json.load(services_file)
def unlock():
global aescipher
if services['cipher'] == '':
cipher = prompt('Enter new password', 'text', textbox_type='password')
services['cipher'] = str(bcrypt.hashpw(cipher.encode('utf-8'), bcrypt.gensalt(12)))[2:-1]
with open(cotp_dir + '/users/' + user_name + '.json', 'w') as services_file:
json.dump(services, services_file)
aescipher = AESCipher(key=cipher)
return
else:
while True:
cipher = prompt('Enter your password', 'text', textbox_type='password')
if bcrypt.hashpw(cipher.encode('utf-8'), services['cipher'].encode('utf-8')) == services['cipher'].encode('utf-8'):
aescipher = AESCipher(key=cipher)
return
else:
prompt('Invalid password')
def background(color):
c.init_pair(1, color, color)
scr.addstr(0, 0, ' ' * (width * height - 1), c.color_pair(1))
def list_box(item_list, x, y, w, h):
first, selected = 0, 0
while True:
viewable_list = item_list[first:first + h + 1]
for i in range(0, len(viewable_list)):
scr.addstr(y + i, x, viewable_list[i][:w], c.A_REVERSE + (c.A_UNDERLINE + c.A_BOLD if i == selected - first else c.A_NORMAL))
scr.addstr(y + i, x + len(viewable_list[i][:w]), ' ' * (w - len(viewable_list[i])), c.A_REVERSE)
key = scr.getch()
if key == c.KEY_UP and selected > 0:
selected -= 1
if selected == first and first > 0:
first -= 1
elif key == c.KEY_DOWN and selected < len(item_list) - 1:
selected += 1
if selected == first + h + 1:
first += 1
elif key == c.KEY_ENTER or key == 10:
return selected
def main_menu():
menu = ['My services', 'Add a service', 'Remove a service', 'Change password', 'Backup', 'Exit application']
maxh = len(menu) + 4
ww, wh = 36, maxh if maxh < height - 3 else height - 3
wy, wx = floor(height / 2 - wh / 2), floor(width / 2 - ww / 2)
while True:
window_box(wx, wy, ww, wh, 'Curses One Time Password Manager')
sel = list_box(menu, wx + 2, wy + 3, ww - 4, wh - 5)
if menu[sel] == 'Exit application':
return
elif menu[sel] == 'My services':
list_tokens('show')
elif menu[sel] == 'Add a service':
add_a_service()
elif menu[sel] == 'Remove a service':
list_tokens('remove')
elif menu[sel] == 'Change password':
change_master_password()
elif menu[sel] == 'Backup':
backup()
def add_a_service():
new_name = prompt('Service name', 'text')
new_totp = prompt('Service key', 'text')
new_service = {}
new_totp = new_totp.replace(' ', '')
new_service['name'] = str(aescipher.encrypt(new_name))[2:-1]
new_service['totp'] = str(aescipher.encrypt(new_totp))[2:-1]
services['services'].append(new_service)
with open(cotp_dir + '/users/' + user_name + '.json', 'w') as services_file:
json.dump(services, services_file)
load_files()
def backup():
backup_choice = prompt('Choose action', 'choice', 'Import', 'Export')
if backup_choice == 'Export':
backup_dir = prompt('Enter directory you want to save backup file to', 'text')
if backup_dir[:-1] == '/':
backup_dir = backup_dir[:-1]
backup_file_name = backup_dir + '/COTP_BACKUP_' + str(strftime("%d-%m-%Y-%H-%M-%S"))
backup_content = json.dumps(services)
backup_content = str(base64.b64encode(backup_content.encode('utf-8')))[2:-1]
with open(backup_file_name, 'w') as backup_file:
backup_file.write(backup_content)
prompt('Backup successful')
elif backup_choice == 'Import':
global aescipher
import_file = prompt('Enter full path to backup file', 'text')
with open(import_file) as backup_file:
backup_services = backup_file.read()
backup_services = json.loads(str(base64.b64decode(backup_services))[2:-1])
import_password = prompt('Enter backup file password', 'text', textbox_type='password')
if bcrypt.hashpw(import_password.encode('utf-8'), backup_services['cipher'].encode('utf-8')) == backup_services['cipher'].encode('utf-8'):
backup_aescipher = AESCipher(key=import_password)
for bs in backup_services['services']:
new_service = {}
new_service['name'] = str(aescipher.encrypt(backup_aescipher.decrypt(bs['name'])))[2:-1]
new_service['totp'] = str(aescipher.encrypt(backup_aescipher.decrypt(bs['totp'])))[2:-1]
services['services'].append(new_service)
with open(cotp_dir + '/users/' + user_name + '.json', 'w') as services_file:
json.dump(services, services_file)
else:
prompt('Invalid backup password')
def change_master_password():
new_password = prompt('Enter new password', 'text', textbox_type='password')
repeat_new_password = prompt('Repeat new password', 'text', textbox_type='password')
if new_password == repeat_new_password:
services['cipher'] = str(bcrypt.hashpw(new_password.encode('utf-8'), bcrypt.gensalt(12)))[2:-1]
global aescipher
old_aescipher = aescipher
aescipher = AESCipher(key=new_password)
for s in range(0, len(services['services'])):
services['services'][s]['name'] = str(aescipher.encrypt(old_aescipher.decrypt(services['services'][s]['name'])))[2:-1]
services['services'][s]['totp'] = str(aescipher.encrypt(old_aescipher.decrypt(services['services'][s]['totp'])))[2:-1]
with open(cotp_dir + '/users/' + user_name + '.json', 'w') as services_file:
json.dump(services, services_file)
prompt('Password changed sucessfully')
else:
prompt('Passwords don\'t match')
def list_tokens(action='show'):
service_names = [t['name'] for t in services['services']]
for s in range(0, len(service_names)):
service_names[s] = aescipher.decrypt(service_names[s])
service_names.append('Back')
maxh = len(service_names) + 4
ww, wh = 20, maxh if maxh < height - 3 else height - 3
wy, wx = floor(height / 2 - wh / 2), floor(width / 2 - ww / 2)
while True:
if action == 'show':
window_box(wx, wy, ww, wh, 'My services')
elif action == 'remove':
window_box(wx, wy, ww, wh, 'Remove service')
sel = list_box(service_names, wx + 2, wy + 3, ww - 4, wh - 5)
if sel == len(service_names) - 1:
return
else:
if action == 'show':
while True:
service_token = str(aescipher.decrypt(services['services'][sel]['totp']))
totp = TOTP(service_token)
totp_token = totp.now()
totp_action = prompt(str(totp_token)[:3] + ' ' + str(totp_token)[3:], 'choice', 'Refresh', 'Back')
if totp_action == 'Back':
return
elif action == 'remove':
confirm = prompt('Delete service ' + str(aescipher.decrypt(services['services'][sel]['name'])) + '?', 'choice', 'No', 'Yes')
if confirm == 'Yes':
services['services'].pop(sel)
with open(cotp_dir + '/users/' + user_name + '.json', 'w') as services_file:
json.dump(services, services_file)
return
else:
return
def main():
init()
load_files()
global aescipher
unlock()
main_menu()
c.endwin()
if __name__ == '__main__':
main()
|
filiparag/cotp
|
cotp.py
|
Python
|
gpl-3.0
| 11,957
|
import random
import string
def _pad_message(message):
""" A PKCS#7 padding implementation for the end of the plaintext message.
Args:
message (str): The full plaintext message.
Returns:
str: A PKCS#7 padded message.
"""
pad = len(message) % 5
if pad is 0:
message += 'ZZZZZ'
elif pad is 1:
message += 'YYYY'
elif pad is 2:
message += 'XXX'
elif pad is 3:
message += 'WW'
else:
message += 'V'
return message
def _unpad_message(message):
""" Remove the padding off a decrypted ciphertext message.
Args:
message (str): The full decrypted ciphertext message.
Returns:
str: The decrypted message without PKCS#7 padding.
"""
pad = message[-1:]
if 'V' in pad:
message = message[:-1]
elif 'W' in pad:
message = message[:-2]
elif 'X' in pad:
message = message[:-3]
elif 'Y' in pad:
message = message[:-4]
elif 'Z' in pad:
message = message[:-5]
return message
def _create_iv(n):
""" Create a random initialization vector.
Prepend a plaintext message with 5 random characters in the same ciphertext
character base as the rest of the ciphertext.
Args:
n (int): The number of random characters to generate.
Returns:
str: An initialization vector string.
"""
r = random.SystemRandom()
return "".join(r.choice(string.uppercase) for i in xrange(n))
def encrypt(message, alg, deck, n):
""" Encrypt a plaintext message.
Args:
message (str): The plaintext message.
alg (object): The specific cipher object.
deck (list): A full 52-card deck. State is maintained.
n (int): The number of initialization vector characters.
Returns:
str: An encrypted message prepended with an initialization vector.
"""
ct = []
iv = _create_iv(n)
for char in message:
if not char in list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
message = message.replace(char, '')
# mixing the deck with the IV (uses the right alphabet/pile)
for char in iv:
alg.prng(deck, char)
msg = iv + message
message = _pad_message(msg)
message = message[n:] # strip iv for encryption
# encrypt the plaintext message sans IV
for char in message:
ct.append(alg.prng(deck, char))
return list(iv) + ct
def decrypt(message, alg, deck, n):
""" Decrypt a ciphertext message.
Args:
message (str): The ciphertext message.
alg (object): The specific cipher object.
deck (list): A full 52-card deck. State is maintained.
n (int): The number of characters of the initialization vector.
Returns:
str: An decrypted message without the initialization vector.
"""
pt = []
iv = message[:n]
message = message[n:]
# mixing the deck with the IV (uses the right alphabet/pile)
for char in iv:
alg.prng(deck, char)
# decrypt the ciphertext message sans IV
for char in message:
pt.append(alg.prng(deck, char, method='decrypt'))
return _unpad_message(pt)
|
atoponce/cardciphers
|
chaocipher/encoder/__init__.py
|
Python
|
gpl-3.0
| 3,178
|
# OpenCMISS Zinc widgets Python package initialisation file.
__version__ = "1.0.0"
|
OpenCMISS-Bindings/ZincPythonTools
|
opencmiss/zincwidgets/__init__.py
|
Python
|
mpl-2.0
| 87
|
"""
Module containing the CanonicalGaussian object
and some helper functions.
@author: scj robertson
@since: 22/05/2016
"""
import numpy as np
import numbers
class CanonicalGaussian:
'''
Class for representing a multivariate Gaussian distribution,
given a partitioned precision matrix and information vector.
This representation makes use of matrix multiplication
to align variables and expand scopes. This is computationally
expensive, but conceptually easy. Not a great representation, but
I couldn't find an exisiting canonical form representation in python.
Multiplication and division have been overloaded, but this in general
quite confusing as it will only be consistent when applied to
CanonicalGaussians. Rather replace it with multiply() and divide()
methods to reduce error.
This class doesn't yet handle any value errors or check
dimensional consistency.
Parameters
----------
vars_ : list
A list of variables, integer values representing a variable.
dims : list
The respective list of the variables' dimensions.
info : (d, 1) ndarray
The information vector.
prec : (d, d) ndarray
The precision matrix.
norm : float
The normalisation constant.
Methods
----------
marginalize
Marginalizes out the given variables and returns a new
distribution.
introduce_evidence
Sets a subset of given variables into a given state.
Example
----------
>>> print('To be completed, look at tracking.ipynb for now.')
'''
def __init__(self, vars_, dims, info, prec, norm):
self._vars = list(vars_)
self._dims = list(dims)
self._info = np.array(info).reshape((sum(dims), 1))
self._prec = np.array(prec)
self._norm = np.float64(norm)
if sorted(vars_) != vars_:
self._order()
def _order(self):
'''
Reorders the arrays so that the variables appear
in ascending order of their numeric values.
'''
v_0, d_0, c_0 = [], [], []
c_r = np.cumsum(([0] + self._dims[:-1])).tolist()
N = len(self._vars)
for i in np.arange(0, N):
j = np.argmin(self._vars)
v_0.append(self._vars.pop(j))
d_0.append(self._dims.pop(j))
c_0.append(c_r.pop(j))
r_0 = np.cumsum(([0] + d_0[:-1])).tolist()
P = block_permutation(r_0, c_0, d_0)
self._vars = v_0
self._dims = d_0
self._prec = (P)@(self._prec)@(P.T)
self._info = (P)@(self._info)
def _rearrange(self, vars_):
'''
Moves the given variables to the end of the
partition matrix. Precomputing for marginilization
and introducing evidence.
Parameters
----------
vars_ : list
The variables that need to be moved.
'''
M = len(vars_)
c_r = np.cumsum(([0] + self._dims[:-1])).tolist()
for i in np.arange(0, M):
j = where(self._vars, vars_[i])
exchange(self._vars, j, -(i+1))
exchange(self._dims, j, -(i+1))
exchange(c_r, j, -(i+1))
r_r = np.cumsum(([0] + self._dims[:-1])).tolist()
P = block_permutation(r_r, c_r, self._dims)
self._prec = (P)@(self._prec)@(P.T)
self._info = (P)@(self._info)
def _expand_scope(self, glob_vars, glob_dims):
'''
Expands the canonical forms scope to
accomodate new variables.
Parameters
----------
glob_vars: list
The full set of variables the new scope
must accomodate.
glob_dims: list
The respective dimensions of the global
variables.
Returns
----------
K_prime: (d, d) ndarray
A new precision matrix with expanded scope.
h_prime: (d, 1) ndarray
A new information vector with expanded scope.
'''
A = np.zeros((sum(glob_dims), sum(self._dims)))
columns = np.cumsum(([0] + self._dims[:-1])).tolist()
glob_rows = np.cumsum(([0] + glob_dims[:-1])).tolist()
rows = []
for i in np.arange(0, len(glob_vars)):
if glob_vars[i] in self._vars:
rows.append(glob_rows[i])
for r, c, d in zip(rows, columns, self._dims):
A[r:r+d, c:c+d] = np.identity(d)
K_prime = (A)@(self._prec)@(A.T)
h_prime = (A)@(self._info)
return K_prime, h_prime
def marginalize(self, vars_):
'''
Marginalize out the given set of variables.
Parameters
----------
vars_ : list
The variables that are to summed out. Needs
to be a subset of _vars.
Returns
----------
C : CanonicalGaussian
A new potential with a reduced scope.
'''
self._rearrange(vars_)
M = len(vars_)
X = sum(self._dims[:-M])
vars_ = self._vars[:-M]
dims = self._dims[:-M]
K_xx = self._prec[:X, :X]
K_yy = np.linalg.inv(self._prec[X:, X:])
K_xy = self._prec[:X, X:]
K_yx = self._prec[X:, :X]
h_x = self._info[:X]
h_y = self._info[X:]
K_prime = K_xx - (K_xy)@(K_yy)@(K_yx)
h_prime = h_x - (K_xy)@(K_yy)@(h_y)
g_prime = self._norm + 0.5*( np.log(np.linalg.det(2*np.pi*K_yy)) + (h_y.T)@(K_yy)@(h_y) )
self._order()
return CanonicalGaussian(vars_, dims, h_prime, K_prime, np.float64(g_prime))
def introduce_evidence(self, vars_, ev):
'''
Force a subset of variables into the given state.
Parameters
----------
vars_ : list
The variables that are to summed out. Need
to be a subset of vars.
ev : (d, 1) ndarray
The state to which the given variables will
be set.
'''
self._rearrange(vars_[::-1])
M = len(vars_)
X = sum(self._dims[:-M])
N = sum(self._dims[M:])
ev = ev.reshape((N, 1))
self._vars = self._vars[:-M]
self._dims = self._dims[:-M]
K_xx = self._prec[:X, :X]
K_yy = self._prec[X:, X:]
K_xy = self._prec[:X, X:]
h_x = self._info[:X]
h_y = self._info[X:]
self._prec = K_xx
self._info = h_x - (K_xy)@(ev)
self._norm += np.float64((h_y.T)@(ev) - 0.5*(ev.T)@(K_yy)@(ev))
self._order()
def __mul__(self, C):
'''
Overloads multiplication.
Parameters
----------
C : CanonicalGaussian or float
The multiplicand.
Returns
----------
C : CanonicalGaussian
The product of the two Gaussians.
'''
if isinstance(C, CanonicalGaussian):
map_ = dict(zip(self._vars + C._vars, self._dims + C._dims))
glob_vars, glob_dims = list(map_.keys()), list(map_.values())
K_1, h_1 = self._expand_scope(glob_vars, glob_dims)
K_2, h_2 = C._expand_scope(glob_vars, glob_dims)
return CanonicalGaussian(glob_vars, glob_dims, h_1 + h_2, K_1 + K_2, self._norm + C._norm)
elif isinstance(C, numbers.Real):
return CanonicalGaussian(self._vars, self._dims, self._info, self._prec, self._norm + np.log(C))
elif isinstance(C, object):
return C.__mul__(self)
def __rmul__(self, C):
'''
Overloads reverse multiplication.
Parameters
----------
C : CanonicalGaussian or float
The multiplicand.
Returns
----------
C : CanonicalGaussian
The product of the two Gaussians.
'''
return self.__mul__(C)
def __truediv__(self, C):
'''
Overloads division.
Parameters
----------
C : CanonicalGaussian
The divisor.
Returns
----------
C : CanonicalGaussian
The quotient of the two Gaussians.
'''
if isinstance(C, CanonicalGaussian):
map_ = dict(zip(self._vars + C._vars, self._dims + C._dims))
glob_vars = list(map_.keys())
glob_dims = list(map_.values())
K_1, h_1 = self._expand_scope(glob_vars, glob_dims)
K_2, h_2 = C._expand_scope(glob_vars, glob_dims)
return CanonicalGaussian(glob_vars, glob_dims, h_1 - h_2, K_1 - K_2, self._norm - C._norm)
else:
return CanonicalGaussian(self._vars, self._dims, self._info, self._prec, self._norm - np.log(C))
def block_permutation(rows, columns, dimensions):
'''
Creates a block permutation matrix for the given rows
and columns.
Parameters
----------
rows : list
The beginning rows of each variables domain.
cols : list
The beginning columns of each variables domain.
dimensions : list
The respective list of the variables dimensions.
'''
N, M = len(dimensions), sum(dimensions)
P = np.zeros((M, M))
for r, c, d in zip(rows, columns, dimensions):
P[r:r + d, c:c + d] = np.identity(d)
return P
def where(list_, arg):
'''
A linear scan to find a variable's argument in
a list. Python offers no better implementation.
Parameters
----------
list_ : list
A list of variables.
arg : int
The variables values
Returns
----------
i : int
The index of the first element equal to arg.
'''
for i in np.arange(0, len(list_)):
if list_[i] == arg:
return i
def exchange(list_, x, y):
'''
A helper function to exchange two positions
in an array.
Parameters
----------
list_ : list
A list.
x : int
The initial position.
y : int
The secondary position.
'''
tmp = list_[x]
list_[x] = list_[y]
list_[y] = tmp
|
scjrobertson/xRange
|
tracking/canonical_gaussian.py
|
Python
|
gpl-3.0
| 10,114
|
import unittest
from test import *
unittest.main()
|
supersheep/huixiang
|
run_test.py
|
Python
|
mit
| 51
|
import skrf as rf
from numpy import *
import os
def deembed(deembedtype,temps,rawpath,dpath,pcb1,pcb2,pcb3):
pcbNet1 = rf.Network(pcb1)
if temps == 1:
pcbNet2 = rf.Network(pcb2)
pcbNet3 = rf.Network(pcb3)
# need to figure out way to only perform the open preparation once
def allopendmbd(pcbNet,embdNet):
pcbnetsize[] = shape(pcbNet.s)
embdnetsize[] = shape(embdNet.s)
idmat = identity(pcbnetsize[2])
onesmat = ones(pcbnetsize[1],pcbnetsize[2])
for i in xrange(1,pcbnetsize[0]):
A = ( pcbNet.s[i,:,:]*idmat + onesmat - idmat ) / 2
embdNet.s[i,:,:] = embdNet.s[i,:,:] / A
B = ( pcbNet.s[i,:,:] - pcbNet.s[i,:,:]*idmat ) / 2
embdNet.s[i,:,:] = embdNet.s[i,:,:] - B
def oneopendmbd(pcbNet,embdNet):
def thrudmbd(pcbNet,embdNet)
for dirpath, dirnames, filenames in os.walk(rawpath):
for name in filenames:
if name.endswith('.s?p'):
os.path.join(dirpath,name)
|
joescape/DeembedProject
|
DeembedFiles.py
|
Python
|
gpl-3.0
| 1,036
|
#!/usr/bin/env python
""" Main deployment script for app
"""
from flaskgur import app
if __name__ == '__main__':
if app.debug:
app.run(debug=True, host='0.0.0.0', port=5018)
else:
app.run(host='0.0.0.0', port=5018)
|
chokepoint/flaskgur
|
runserver.py
|
Python
|
gpl-2.0
| 241
|
# -*- coding: utf-8 -*-
import socket
import ibm_db
conn = ibm_db.connect("DATABASE=sample;HOSTNAME=66.3.44.37;PORT=60004;PROTOCOL=TCPIP;UID=cyrus;PWD=cyrus;", "", "")
sql = "SELECT PHONENO FROM EMPLOYEE"
def queryData(s):
result = []
if conn:
try:
stmt = ibm_db.exec_immediate(conn, s)
dictionary = ibm_db.fetch_both(stmt) # 提取结果
while dictionary != False:
for key in dictionary:
if key == 0:
result.append(dictionary[key])
dictionary = ibm_db.fetch_both(stmt)
return result
except Exception as ex:
print(ex)
finally:
ibm_db.close(conn)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 9000))
print((s.recv(1024).decode()))
tupleOfquery = ('41588', '41939')
s.send((str(tupleOfquery)).encode())
print((s.recv(1024)).decode())
# res = queryData(sql)
# for data in res:
# s.send((str(data)).encode())
# print((s.recv(1024)).decode())
#s.send('exit'.encode())
s.close()
|
cysuncn/python
|
study/Socket/SocketClient.py
|
Python
|
gpl-3.0
| 1,102
|
#!/usr/bin/env python3
#
# mmgen = Multi-Mode GENerator, a command-line cryptocurrency wallet
# Copyright (C)2013-2022 The MMGen Project <mmgen@tuta.io>
# Licensed under the GNU General Public License, Version 3:
# https://www.gnu.org/licenses
# Public project repositories:
# https://github.com/mmgen/mmgen
# https://gitlab.com/mmgen/mmgen
"""
base_proto.bitcoin.twaddrs: Bitcoin base protocol tracking wallet address list class
"""
from ....util import msg,die
from ....obj import MMGenList
from ....addr import CoinAddr
from ....rpc import rpc_init
from ....tw.addrs import TwAddrList
from ....tw.common import get_tw_label
class BitcoinTwAddrList(TwAddrList):
has_age = True
async def __init__(self,proto,usr_addr_list,minconf,showempty,showbtcaddrs,all_labels,wallet=None):
def check_dup_mmid(acct_labels):
mmid_prev,err = None,False
for mmid in sorted(a.mmid for a in acct_labels if a):
if mmid == mmid_prev:
err = True
msg(f'Duplicate MMGen ID ({mmid}) discovered in tracking wallet!\n')
mmid_prev = mmid
if err:
die(4,'Tracking wallet is corrupted!')
def check_addr_array_lens(acct_pairs):
err = False
for label,addrs in acct_pairs:
if not label:
continue
if len(addrs) != 1:
err = True
if len(addrs) == 0:
msg(f'Label {label!r}: has no associated address!')
else:
msg(f'{addrs!r}: more than one {proto.coin} address in account!')
if err:
die(4,'Tracking wallet is corrupted!')
self.rpc = await rpc_init(proto)
self.total = proto.coin_amt('0')
self.proto = proto
lbl_id = ('account','label')['label_api' in self.rpc.caps]
for d in await self.rpc.call('listunspent',0):
if not lbl_id in d:
continue # skip coinbase outputs with missing account
if d['confirmations'] < minconf:
continue
label = get_tw_label(proto,d[lbl_id])
if label:
lm = label.mmid
if usr_addr_list and (lm not in usr_addr_list):
continue
if lm in self:
if self[lm]['addr'] != d['address']:
die(2,'duplicate {} address ({}) for this MMGen address! ({})'.format(
proto.coin,
d['address'],
self[lm]['addr'] ))
else:
lm.confs = d['confirmations']
lm.txid = d['txid']
lm.date = None
self[lm] = {
'amt': proto.coin_amt('0'),
'lbl': label,
'addr': CoinAddr(proto,d['address']) }
amt = proto.coin_amt(d['amount'])
self[lm]['amt'] += amt
self.total += amt
# We use listaccounts only for empty addresses, as it shows false positive balances
if showempty or all_labels:
# for compatibility with old mmids, must use raw RPC rather than native data for matching
# args: minconf,watchonly, MUST use keys() so we get list, not dict
if 'label_api' in self.rpc.caps:
acct_list = await self.rpc.call('listlabels')
aa = await self.rpc.batch_call('getaddressesbylabel',[(k,) for k in acct_list])
acct_addrs = [list(a.keys()) for a in aa]
else:
acct_list = list((await self.rpc.call('listaccounts',0,True)).keys()) # raw list, no 'L'
acct_addrs = await self.rpc.batch_call('getaddressesbyaccount',[(a,) for a in acct_list]) # use raw list here
acct_labels = MMGenList([get_tw_label(proto,a) for a in acct_list])
check_dup_mmid(acct_labels)
assert len(acct_list) == len(acct_addrs),(
'listaccounts() and getaddressesbyaccount() not equal in length')
addr_pairs = list(zip(acct_labels,acct_addrs))
check_addr_array_lens(addr_pairs)
for label,addr_arr in addr_pairs:
if not label:
continue
if all_labels and not showempty and not label.comment:
continue
if usr_addr_list and (label.mmid not in usr_addr_list):
continue
if label.mmid not in self:
self[label.mmid] = { 'amt':proto.coin_amt('0'), 'lbl':label, 'addr':'' }
if showbtcaddrs:
self[label.mmid]['addr'] = CoinAddr(proto,addr_arr[0])
|
mmgen/mmgen
|
mmgen/base_proto/bitcoin/tw/addrs.py
|
Python
|
gpl-3.0
| 3,890
|
#!/usr/bin/python
import subprocess
import threading
import multiprocessing
conf_str_template = '''init_cwnd: 12
max_cwnd: 15
retx_timeout: 45e-06
queue_size: 36864
propagation_delay: 0.0000002
bandwidth: 40000000000.0
queue_type: {0}
flow_type: 2
num_flow: 100000
flow_trace: CDF_aditya.txt
cut_through: 1
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
big_switch: 0
host_type: 1
traffic_imbalance: 0
load: 0.6
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 1
srpt_mode: {1}
flow_split_mode: {2}
congestion_compress: {3}
pq_mode: {4}
srpt_with_fair: {5}
'''
template = './simulator 1 conf_{0}_split={1}_compres={2}.txt > result_{0}_split={1}_compres={2}.txt'
def getNumLines(trace):
out = subprocess.check_output('wc -l {}'.format(trace), shell=True)
return int(out.split()[0])
def run_exp(rw, semaphore):
semaphore.acquire()
print template.format(*rw)
subprocess.call(template.format(*rw), shell=True)
semaphore.release()
queue_types = [2, 2, 2, 2, 2, 3, 3, 3, 3, 4]
srpt_mode = [10, 20, 30,30,30, 10, 30, 0, 0, 0]
pq_mode = [0,0,0,0,0,30,30,20,10,0]
srf = [0,0,0,1,2,0,0,0,0,0]
names = ["srpt", "wrong_srpt", "lazy_srpt" , "lazy_srpt_fair", "lazy_srpt_and_wrong", "srpt_pq", "lazy_srpt_pq", "fifo", "fairness", "sergei_fairness"]
threads = []
semaphore = threading.Semaphore(multiprocessing.cpu_count())
comr_scale = 0.05
for comr in range(1, 11):
for split_mode in range(5, 6):
for i in range(0, 10):
rv = comr_scale * comr;
conf_str = conf_str_template.format(queue_types[i], srpt_mode[i], split_mode, rv, pq_mode[i], srf[i])
conf_file = "conf_{0}_split={1}_compres={2}.txt".format(names[i], split_mode, rv)
with open(conf_file, 'w') as f:
print conf_file
f.write(conf_str)
threads.append(threading.Thread(target=run_exp, args=((names[i], split_mode, rv), semaphore)))
print '\n'
print '\n'
[t.start() for t in threads]
[t.join() for t in threads]
print 'finished', len(threads), 'experiments'
|
InfocomFlowSimulator/queue_simulator
|
runner/run_experiments.py
|
Python
|
bsd-3-clause
| 2,485
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from heat.common import context
from heat.common import exception
from heat.common.i18n import _
from heat.common import template_format
from heat.engine import attributes
from heat.engine import environment
from heat.engine import function
from heat.engine import properties
from heat.engine import resource
from heat.engine import template
LOG = logging.getLogger(__name__)
class RemoteStack(resource.Resource):
"""A Resource representing a stack.
A resource that allowing for the creating stack, where should be defined
stack template in HOT format, parameters (if template has any parameters
with no default value), and timeout of creating. After creating current
stack will have remote stack.
"""
default_client_name = 'heat'
PROPERTIES = (
CONTEXT, TEMPLATE, TIMEOUT, PARAMETERS,
) = (
'context', 'template', 'timeout', 'parameters',
)
ATTRIBUTES = (
NAME_ATTR, OUTPUTS,
) = (
'stack_name', 'outputs',
)
_CONTEXT_KEYS = (
REGION_NAME
) = (
'region_name'
)
properties_schema = {
CONTEXT: properties.Schema(
properties.Schema.MAP,
_('Context for this stack.'),
schema={
REGION_NAME: properties.Schema(
properties.Schema.STRING,
_('Region name in which this stack will be created.'),
required=True,
)
}
),
TEMPLATE: properties.Schema(
properties.Schema.STRING,
_('Template that specifies the stack to be created as '
'a resource.'),
required=True,
update_allowed=True
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Number of minutes to wait for this stack creation.'),
update_allowed=True
),
PARAMETERS: properties.Schema(
properties.Schema.MAP,
_('Set of parameters passed to this stack.'),
default={},
update_allowed=True
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name of the stack.'),
type=attributes.Schema.STRING
),
OUTPUTS: attributes.Schema(
_('A dict of key-value pairs output from the stack.'),
type=attributes.Schema.MAP
),
}
def __init__(self, name, definition, stack):
super(RemoteStack, self).__init__(name, definition, stack)
self._region_name = None
self._local_context = None
def _context(self):
if self._local_context:
return self._local_context
ctx_props = self.properties.get(self.CONTEXT)
if ctx_props:
self._region_name = ctx_props[self.REGION_NAME]
else:
self._region_name = self.context.region_name
# Build RequestContext from existing one
dict_ctxt = self.context.to_dict()
dict_ctxt.update({'region_name': self._region_name,
'overwrite': False})
self._local_context = context.RequestContext.from_dict(dict_ctxt)
return self._local_context
def heat(self):
# A convenience method overriding Resource.heat()
return self._context().clients.client(self.default_client_name)
def client_plugin(self):
# A convenience method overriding Resource.client_plugin()
return self._context().clients.client_plugin(self.default_client_name)
def validate(self):
super(RemoteStack, self).validate()
try:
self.heat()
except Exception as ex:
exc_info = dict(region=self._region_name, exc=six.text_type(ex))
msg = _('Cannot establish connection to Heat endpoint at region '
'"%(region)s" due to "%(exc)s"') % exc_info
raise exception.StackValidationFailed(message=msg)
try:
params = self.properties[self.PARAMETERS]
env = environment.get_child_environment(self.stack.env, params)
tmpl = template_format.parse(self.properties[self.TEMPLATE])
args = {
'template': tmpl,
'files': self.stack.t.files,
'environment': env.user_env_as_dict(),
}
self.heat().stacks.validate(**args)
except Exception as ex:
exc_info = dict(region=self._region_name, exc=six.text_type(ex))
msg = _('Failed validating stack template using Heat endpoint at '
'region "%(region)s" due to "%(exc)s"') % exc_info
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
params = self.properties[self.PARAMETERS]
env = environment.get_child_environment(self.stack.env, params)
tmpl = template_format.parse(self.properties[self.TEMPLATE])
args = {
'stack_name': self.physical_resource_name_or_FnGetRefId(),
'template': tmpl,
'timeout_mins': self.properties[self.TIMEOUT],
'disable_rollback': True,
'parameters': params,
'files': self.stack.t.files,
'environment': env.user_env_as_dict(),
}
remote_stack_id = self.heat().stacks.create(**args)['stack']['id']
self.resource_id_set(remote_stack_id)
def handle_delete(self):
if self.resource_id is not None:
with self.client_plugin().ignore_not_found:
self.heat().stacks.delete(stack_id=self.resource_id)
def handle_resume(self):
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource not found')
% self.name)
self.heat().actions.resume(stack_id=self.resource_id)
def handle_suspend(self):
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource not found')
% self.name)
self.heat().actions.suspend(stack_id=self.resource_id)
def handle_snapshot(self):
snapshot = self.heat().stacks.snapshot(stack_id=self.resource_id)
self.data_set('snapshot_id', snapshot['id'])
def handle_restore(self, defn, restore_data):
snapshot_id = restore_data['resource_data']['snapshot_id']
snapshot = self.heat().stacks.snapshot_show(self.resource_id,
snapshot_id)
s_data = snapshot['snapshot']['data']
env = environment.Environment(s_data['environment'])
files = s_data['files']
tmpl = template.Template(s_data['template'], env=env, files=files)
props = function.resolve(self.properties.data)
props[self.TEMPLATE] = jsonutils.dumps(tmpl.t)
props[self.PARAMETERS] = env.params
return defn.freeze(properties=props)
def handle_check(self):
self.heat().actions.check(stack_id=self.resource_id)
def _needs_update(self, after, before, after_props, before_props,
prev_resource, check_init_complete=True):
# If resource is in CHECK_FAILED state, raise UpdateReplace
# to replace the failed stack.
if self.state == (self.CHECK, self.FAILED):
raise resource.UpdateReplace(self)
# Always issue an update to the remote stack and let the individual
# resources in it decide if they need updating.
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
# Always issue an update to the remote stack and let the individual
# resources in it decide if they need updating.
if self.resource_id:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
params = self.properties[self.PARAMETERS]
env = environment.get_child_environment(self.stack.env, params)
tmpl = template_format.parse(self.properties[self.TEMPLATE])
fields = {
'stack_id': self.resource_id,
'parameters': params,
'template': tmpl,
'timeout_mins': self.properties[self.TIMEOUT],
'disable_rollback': self.stack.disable_rollback,
'files': self.stack.t.files,
'environment': env.user_env_as_dict(),
}
self.heat().stacks.update(**fields)
def _check_action_complete(self, action):
stack = self.heat().stacks.get(stack_id=self.resource_id)
if stack.action != action:
return False
if stack.status == self.IN_PROGRESS:
return False
elif stack.status == self.COMPLETE:
return True
elif stack.status == self.FAILED:
raise exception.ResourceInError(
resource_status=stack.stack_status,
status_reason=stack.stack_status_reason)
else:
# Note: this should never happen, so it really means that
# the resource/engine is in serious problem if it happens.
raise exception.ResourceUnknownStatus(
resource_status=stack.stack_status,
status_reason=stack.stack_status_reason)
def check_create_complete(self, *args):
return self._check_action_complete(action=self.CREATE)
def check_delete_complete(self, *args):
if self.resource_id is None:
return True
try:
return self._check_action_complete(action=self.DELETE)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return True
def check_resume_complete(self, *args):
return self._check_action_complete(action=self.RESUME)
def check_suspend_complete(self, *args):
return self._check_action_complete(action=self.SUSPEND)
def check_update_complete(self, *args):
return self._check_action_complete(action=self.UPDATE)
def check_snapshot_complete(self, *args):
return self._check_action_complete(action=self.SNAPSHOT)
def check_check_complete(self, *args):
return self._check_action_complete(action=self.CHECK)
def _resolve_attribute(self, name):
stack = self.heat().stacks.get(stack_id=self.resource_id)
if name == self.NAME_ATTR:
value = getattr(stack, name, None)
return value or self.physical_resource_name_or_FnGetRefId()
if name == self.OUTPUTS:
outputs = stack.outputs
return dict((output['output_key'], output['output_value'])
for output in outputs)
def get_reference_id(self):
return self.resource_id
def resource_mapping():
return {
'OS::Heat::Stack': RemoteStack,
}
|
cwolferh/heat-scratch
|
heat/engine/resources/openstack/heat/remote_stack.py
|
Python
|
apache-2.0
| 11,589
|
# Copyright: (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import json
import pytest
from ansible.modules.net_tools import nmcli
pytestmark = pytest.mark.usefixtures('patch_ansible_module')
TESTCASE_CONNECTION = [
{
'type': 'ethernet',
'conn_name': 'non_existent_nw_device',
'state': 'absent',
'_ansible_check_mode': True,
},
{
'type': 'generic',
'conn_name': 'non_existent_nw_device',
'state': 'absent',
'_ansible_check_mode': True,
},
{
'type': 'team',
'conn_name': 'non_existent_nw_device',
'state': 'absent',
'_ansible_check_mode': True,
},
{
'type': 'bond',
'conn_name': 'non_existent_nw_device',
'state': 'absent',
'_ansible_check_mode': True,
},
{
'type': 'bond-slave',
'conn_name': 'non_existent_nw_device',
'state': 'absent',
'_ansible_check_mode': True,
},
{
'type': 'bridge',
'conn_name': 'non_existent_nw_device',
'state': 'absent',
'_ansible_check_mode': True,
},
{
'type': 'vlan',
'conn_name': 'non_existent_nw_device',
'state': 'absent',
'_ansible_check_mode': True,
},
]
TESTCASE_GENERIC = [
{
'type': 'generic',
'conn_name': 'non_existent_nw_device',
'ifname': 'generic_non_existant',
'ip4': '10.10.10.10',
'gw4': '10.10.10.1',
'state': 'present',
'_ansible_check_mode': False,
},
]
TESTCASE_GENERIC_DNS4_SEARCH = [
{
'type': 'generic',
'conn_name': 'non_existent_nw_device',
'ifname': 'generic_non_existant',
'ip4': '10.10.10.10',
'gw4': '10.10.10.1',
'state': 'present',
'dns4_search': 'search.redhat.com',
'dns6_search': 'search6.redhat.com',
'_ansible_check_mode': False,
}
]
TESTCASE_BOND = [
{
'type': 'bond',
'conn_name': 'non_existent_nw_device',
'ifname': 'bond_non_existant',
'mode': 'active-backup',
'ip4': '10.10.10.10',
'gw4': '10.10.10.1',
'state': 'present',
'primary': 'non_existent_primary',
'_ansible_check_mode': False,
}
]
TESTCASE_BRIDGE = [
{
'type': 'bridge',
'conn_name': 'non_existent_nw_device',
'ifname': 'br0_non_existant',
'ip4': '10.10.10.10',
'gw4': '10.10.10.1',
'maxage': '100',
'stp': True,
'state': 'present',
'_ansible_check_mode': False,
}
]
TESTCASE_BRIDGE_SLAVE = [
{
'type': 'bridge-slave',
'conn_name': 'non_existent_nw_device',
'ifname': 'br0_non_existant',
'path_cost': 100,
'state': 'present',
'_ansible_check_mode': False,
}
]
TESTCASE_VLAN = [
{
'type': 'vlan',
'conn_name': 'non_existent_nw_device',
'ifname': 'vlan_not_exists',
'ip4': '10.10.10.10',
'gw4': '10.10.10.1',
'state': 'present',
'_ansible_check_mode': False,
}
]
TESTCASE_ETHERNET_DHCP = [
{
'type': 'ethernet',
'conn_name': 'non_existent_nw_device',
'ifname': 'ethernet_non_existant',
'ip4': '10.10.10.10',
'gw4': '10.10.10.1',
'state': 'present',
'_ansible_check_mode': False,
'dhcp_client_id': '00:11:22:AA:BB:CC:DD',
}
]
def mocker_set(mocker, connection_exists=False):
"""
Common mocker object
"""
mocker.patch('ansible.modules.net_tools.nmcli.HAVE_DBUS', True)
mocker.patch('ansible.modules.net_tools.nmcli.HAVE_NM_CLIENT', True)
get_bin_path = mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
get_bin_path.return_value = '/usr/bin/nmcli'
connection = mocker.patch.object(nmcli.Nmcli, 'connection_exists')
connection.return_value = connection_exists
return connection
@pytest.fixture
def mocked_generic_connection_create(mocker):
mocker_set(mocker)
command_result = mocker.patch.object(nmcli.Nmcli, 'execute_command')
command_result.return_value = {"rc": 100, "out": "aaa", "err": "none"}
return command_result
@pytest.fixture
def mocked_generic_connection_modify(mocker):
mocker_set(mocker, connection_exists=True)
command_result = mocker.patch.object(nmcli.Nmcli, 'execute_command')
command_result.return_value = {"rc": 100, "out": "aaa", "err": "none"}
return command_result
@pytest.fixture
def mocked_connection_exists(mocker):
connection = mocker_set(mocker, connection_exists=True)
return connection
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module'])
def test_bond_connection_create(mocked_generic_connection_create):
"""
Test : Bond connection created
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert args[0][0] == '/usr/bin/nmcli'
assert args[0][1] == 'con'
assert args[0][2] == 'add'
assert args[0][3] == 'type'
assert args[0][4] == 'bond'
assert args[0][5] == 'con-name'
assert args[0][6] == 'non_existent_nw_device'
assert args[0][7] == 'ifname'
assert args[0][8] == 'bond_non_existant'
for param in ['ipv4.gateway', 'primary', 'autoconnect', 'mode', 'active-backup', 'ipv4.address']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module'])
def test_generic_connection_create(mocked_generic_connection_create):
"""
Test : Generic connection created
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert args[0][0] == '/usr/bin/nmcli'
assert args[0][1] == 'con'
assert args[0][2] == 'add'
assert args[0][3] == 'type'
assert args[0][4] == 'generic'
assert args[0][5] == 'con-name'
assert args[0][6] == 'non_existent_nw_device'
for param in ['autoconnect', 'ipv4.gateway', 'ipv4.address']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module'])
def test_generic_connection_modify(mocked_generic_connection_modify):
"""
Test : Generic connection modify
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert args[0][0] == '/usr/bin/nmcli'
assert args[0][1] == 'con'
assert args[0][2] == 'mod'
assert args[0][3] == 'non_existent_nw_device'
for param in ['ipv4.gateway', 'ipv4.address']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
def test_generic_connection_create_dns_search(mocked_generic_connection_create):
"""
Test : Generic connection created with dns search
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert 'ipv4.dns-search' in args[0]
assert 'ipv6.dns-search' in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
def test_generic_connection_modify_dns_search(mocked_generic_connection_create):
"""
Test : Generic connection modified with dns search
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert 'ipv4.dns-search' in args[0]
assert 'ipv6.dns-search' in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_CONNECTION, indirect=['patch_ansible_module'])
def test_dns4_none(mocked_connection_exists, capfd):
"""
Test if DNS4 param is None
"""
with pytest.raises(SystemExit):
nmcli.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['changed']
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module'])
def test_create_bridge(mocked_generic_connection_create):
"""
Test if Bridge created
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert args[0][0] == '/usr/bin/nmcli'
assert args[0][1] == 'con'
assert args[0][2] == 'add'
assert args[0][3] == 'type'
assert args[0][4] == 'bridge'
assert args[0][5] == 'con-name'
assert args[0][6] == 'non_existent_nw_device'
for param in ['ip4', '10.10.10.10', 'gw4', '10.10.10.1', 'bridge.max-age', '100', 'bridge.stp', 'yes']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module'])
def test_mod_bridge(mocked_generic_connection_modify):
"""
Test if Bridge modified
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert args[0][0] == '/usr/bin/nmcli'
assert args[0][1] == 'con'
assert args[0][2] == 'mod'
assert args[0][3] == 'non_existent_nw_device'
for param in ['ip4', '10.10.10.10', 'gw4', '10.10.10.1', 'bridge.max-age', '100', 'bridge.stp', 'yes']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module'])
def test_create_bridge_slave(mocked_generic_connection_create):
"""
Test if Bridge_slave created
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert args[0][0] == '/usr/bin/nmcli'
assert args[0][1] == 'con'
assert args[0][2] == 'add'
assert args[0][3] == 'type'
assert args[0][4] == 'bridge-slave'
assert args[0][5] == 'con-name'
assert args[0][6] == 'non_existent_nw_device'
for param in ['bridge-port.path-cost', '100']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module'])
def test_mod_bridge_slave(mocked_generic_connection_modify):
"""
Test if Bridge_slave modified
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert args[0][0] == '/usr/bin/nmcli'
assert args[0][1] == 'con'
assert args[0][2] == 'mod'
assert args[0][3] == 'non_existent_nw_device'
for param in ['bridge-port.path-cost', '100']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module'])
def test_create_vlan_con(mocked_generic_connection_create):
"""
Test if VLAN created
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
for param in ['vlan']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module'])
def test_mod_vlan_conn(mocked_generic_connection_modify):
"""
Test if VLAN modified
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
for param in ['vlan.id']:
assert param in args[0]
@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module'])
def test_eth_dhcp_client_id_con_create(mocked_generic_connection_create):
"""
Test : Ethernet connection created with DHCP_CLIENT_ID
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 1
arg_list = nmcli.Nmcli.execute_command.call_args_list
args, kwargs = arg_list[0]
assert 'ipv4.dhcp-client-id' in args[0]
|
wrouesnel/ansible
|
test/units/modules/net_tools/test_nmcli.py
|
Python
|
gpl-3.0
| 12,890
|
"""
Definition of a metaclass for immutable user-defined objects
"""
import functools
import itertools
import collections
#
# The metaclass
# =============
#
class ImmutableClass(type):
"""Immutable class metaclass derived from tuple"""
def __new__(mcs, name, bases, nmspc, default_attr=lambda _: None):
"""Generates a new type instance for the immutable class"""
# Make a shallow copy of the original namespace. The new copy can be
# used for the new immutable class, while the original copy is going to
# be for the proxy class.
new_nmspc = dict(nmspc)
# Fields determination
# --------------------
fields, defining_count = _determine_fields(bases, new_nmspc)
# Update the corresponding fields in the name space
new_nmspc['__fields__'] = fields
new_nmspc['__slots__'] = () # prevent new attribute assignment
new_nmspc['__defining_count__'] = defining_count
# Initializer definition
# ----------------------
# Prepare the proxy class in initialization function
ProxyClass = _generate_proxy_class(
name, bases, nmspc
)
new_nmspc['__Proxy_Class__'] = ProxyClass
# Define the new method
@functools.wraps(ProxyClass.__init__)
def new_meth(cls, *args, **kwargs):
"""Set a new object of the immutable class"""
# Initialize the proxy by user initializer
proxy = ProxyClass(*args, **kwargs)
# Make the actual immutable object from the proxy object
return tuple.__new__(
cls, (getattr(proxy, i, default_attr(i)) for i in fields)
)
# Register the new method
new_nmspc['__new__'] = new_meth
new_nmspc['__init__'] = _decorate_immutable_init(ProxyClass.__init__)
# Utility methods
# ---------------
# Attribute access
#
# Generate a dictionary here and let the actual method access it in the
# closure. Going to be used by several methods.
attr_idxes = {
name: idx for idx, name in enumerate(fields)
}
def getattr_meth(self, attr):
"""Gets the attribute of the given name"""
try:
return self[attr_idxes[attr]]
except KeyError:
raise KeyError(
'Invalid attribute %s' % attr
)
# Register the attribute getter
new_nmspc['__getattr__'] = getattr_meth
# Update method
def update_meth(self, **kwargs):
"""Updates defining attributes"""
result = self.__class__(
*(map(kwargs.pop, fields[0:defining_count], self))
)
if kwargs:
raise ValueError(
'Got unexpected field names %r' % list(kwargs)
)
return result
new_nmspc['_update'] = update_meth
# Replace method
def replace_meth(self, **kwargs):
"""Simply replace a field in the object"""
result = tuple.__new__(
self.__class__,
map(kwargs.pop, fields, self)
)
if kwargs:
raise ValueError(
'Got unexpected field names %r' % list(kwargs)
)
return result
new_nmspc['_replace'] = replace_meth
# repr method
def repr_meth(self):
"""Returns the nicely formmatted string"""
return self.__class__.__name__ + '(' + (
', '.join(
'%s=%r' % (i, j)
for i, j in zip(fields[0:defining_count], self)
)
) + ')'
if '__repr__' not in new_nmspc:
new_nmspc['__repr__'] = repr_meth
if '__str__' not in new_nmspc:
new_nmspc['__str__'] = repr_meth
# hash method
if '__hash__' not in new_nmspc:
new_nmspc['__hash__'] = lambda x: hash(
(id(x.__class__), ) + x[0:defining_count]
)
# Better error for attempts to mutate
def setattr_meth(self, attr, value):
"""Raises Attribute Error for attempts to mutate"""
raise AttributeError(
'Cannot mutate attributes of immutable objects'
)
new_nmspc['__setattr__'] = setattr_meth
# Dictionary returning
def asdict_meth(self, full=False, ordered=False):
"""Returns an dictionary which maps field names to values
:param bool full: If the data fields are going to be contained as
well, by default only the defining fields are contained.
:param bool ordered: If OrderedDict or plain dictionary is going to
be used for holding the return value. By default a plain
dictionary is going to be used.
"""
included_fields = fields if full else fields[0:defining_count]
container = collections.OrderedDict if ordered else dict
return container(
zip(included_fields, self)
)
new_nmspc['_asdict'] = asdict_meth
# Pickling support
# ----------------
new_nmspc['__getnewargs__'] = lambda self: self[0:defining_count]
new_nmspc['__getstate__'] = lambda self: False
new_nmspc['__setstate__'] = lambda self, state: False
# Class generation
# ----------------
#
# Set the base to tuple for the bottom classes in inheritance trees
if len(bases) == 0:
bases = (tuple, )
# Return the new class
return type.__new__(mcs, name, bases, new_nmspc)
def __init__(cls, *args, **kwargs):
super().__init__(*args)
#
# The utility functions
# =====================
#
def _get_argnames(func):
"""Gets the names of the argument of a function"""
return func.__code__.co_varnames[0:func.__code__.co_argcount]
def _determine_fields(bases, nmspc):
"""Determines the required fields for the new immutable classs
:param tuple bases: The base classes of the new class
:param dict nmspc: The name space dictionary for the new class
:returns: The tuple of all the fields of the new class, and the number of
defining fields.
"""
# Get all the data fields
#
# It need to contain all the fields that the base classes have got.
# Fields that are still defining fields are going to be removed later.
fields = set()
for base in bases:
if isinstance(base, ImmutableClass):
fields.update(base.__fields__)
else:
raise TypeError(
'Type %s is not an immutable class' % base
)
# The new data fields that is added for this class
if '__fields__' in nmspc:
fields.update(nmspc['__fields__'])
# Get all the defining fields
try:
init_meth = nmspc['__init__']
init_argnames = _get_argnames(init_meth)
except KeyError:
raise ValueError('Initializer needs to be explicitly given.')
except AttributeError:
raise ValueError('Initializer needs to be a function.')
defining_fields = init_argnames[1:]
defining_count = len(defining_fields)
# Assemble the list of all fields
#
# Remove the defining fields that is already added as data fields from
# base classes.
data_fields = tuple(fields.difference(defining_fields))
# Assemble the tuple of all fields
fields = defining_fields + data_fields
return fields, defining_count
def _decorate_proxy_init(init):
"""Decorates the initialization function to be used for proxy class
After the decoration, all the arguments will be assigned as attributes of
``self`` before the invocation of the actual initializer.
"""
@functools.wraps(init)
def decorated(self, *args, **kwargs):
"""The decorated initializer"""
argnames = _get_argnames(init)
for field, value in itertools.chain(
zip(argnames[1:], args),
kwargs.items()
):
setattr(self, field, value)
init(self, *args, **kwargs)
return decorated
def _decorate_immutable_init(proxy_init):
"""Decorate the initialization function to be used for the immutable class
To facilitate the calling of the initialization function of the base class,
the initialization function is still put in the actual immutable class,
although its actual function is already moved into the new method. In order
to avoid it trying to taint the immutate object and causing error, this
function can be used for wrapping an proxy initialization function into an
function that can be safely set as the initializer for the immutable class
without problem.
:param proxy_init: The initializer for the proxy class
:returns: The decorated initializer that can be set for immutable classes
"""
@functools.wraps(proxy_init)
def decorared(self, *args, **kwargs):
"""The decorated initializer"""
if isinstance(self.__class__, ImmutableClass):
pass
else:
return proxy_init(self, *args, **kwargs)
return decorared
def _generate_proxy_class(name, bases, orig_nmspc):
"""Generate a initialize proxy class for the immutable class
The generated proxy class will have got all the behaviour of the new class
and itsbase classes. Just it is a regular mutable class. Its instances can
be used in the invocation of the initializer and act as the ``self``. Then
the actual defining and data fields can be read from the proxy and set in
the actual immutable class instance.
:param str name: The name of the new immutable class
:param tuple bases: The basis of the new immutable class
:param orig_nmspc: The name space dictionary of the immutable class before
any twicking by this metaclass.
"""
# The proxy class for each immutable class will be stored in the
# __Proxy_Class__ attribute by convention.
proxy_bases = tuple(i.__Proxy_Class__ for i in bases)
ProxyClass = type(
'%sProxyClass' % name,
proxy_bases if len(proxy_bases) >= 1 else (object, ),
orig_nmspc
)
# Decorated initializer will set all the defining fields
proxy_init_meth = _decorate_proxy_init(
getattr(ProxyClass, '__init__')
)
setattr(ProxyClass, '__init__', proxy_init_meth)
def proxy_super_meth(self):
"""Returns the super class of the Proxy class"""
return super(ProxyClass, self)
setattr(ProxyClass, 'super', proxy_super_meth)
return ProxyClass
|
tschijnmo/immutableclass
|
immutableclass.py
|
Python
|
mit
| 10,899
|
#!/usr/bin/env python
from telegram import TelegramObject
class ForceReply(TelegramObject):
def __init__(self,
force_reply=True,
selective=None):
self.force_reply = force_reply
self.selective = selective
@staticmethod
def de_json(data):
return ForceReply(force_reply=data.get('force_reply', None),
selective=data.get('selective', None))
def to_dict(self):
data = {'force_reply': self.force_reply}
if self.selective:
data['selective'] = self.selective
return data
|
yosit/kinnernet_bot
|
lib/telegram/forcereply.py
|
Python
|
apache-2.0
| 628
|
"""
"""
from docker import DockerCompose
from common import Directories, ConfigManager
import util
import time
import logging
logger = logging.getLogger(__name__)
class WebService:
SERVICE_NAME = "webapp"
def __init__(self, context):
self.context = context
self.config = self.context.manager.config
# ----------
# Client API
# ----------
def inject_cmfive_config_file(self, db_hostname):
logger.info("inject config.php into web container")
# add or override db_hostname config
tokens = dict(self.config)
tokens.update({"db_instance_endpoint": db_hostname})
# render template into stage dir
util.inflate_template(
self.context.dirs.cmfive.joinpath("config.php.template"),
self.context.dirs.stage,
".template",
tokens,
False
)
# copy file into container(s)
for container in DockerCompose.containers_by_service(self.SERVICE_NAME):
container.copy_into(
source=self.context.dirs.stage.joinpath("config.php"),
target="/var/www/html/config.php"
)
def install_test_packages(self):
logger.info("install test packages")
self.run("sh test/.install/install.sh")
def install_core(self):
logger.info("install cmfive core")
self.run(f"php cmfive.php install core {self.config['cmfive_core_ref']}")
def seed_encryption(self):
logger.info("seed encryption key")
self.run("php cmfive.php seed encryption")
def install_migration(self):
logger.info("perform module database migrations")
self.run("php cmfive.php install migration")
def seed_admin(self):
logger.info("seed cmfive admin user")
self.run("php cmfive.php seed admin '{}' '{}' '{}' '{}' '{}'".format(
self.config['admin_first_name'],
self.config['admin_last_name'],
self.config['admin_email'],
self.config['admin_login_username'],
self.config['admin_login_password']
))
def update_permissions(self):
logger.info("update container permissions")
self.run("chmod 777 -R cache storage uploads")
@staticmethod
def snapshot_container(tag):
container = WebService.container_by_index(0)
util.run(f"docker commit {container.container_name} {tag}")
# ---------------
# Private Methods
# ---------------
def run(self, command):
"""run command against web service container(s)"""
for container in DockerCompose.containers_by_service(self.SERVICE_NAME):
container.run_command(command)
@staticmethod
def container_by_index(index):
containers = list(DockerCompose.containers_by_service(WebService.SERVICE_NAME))
return containers[index]
|
2pisoftware/cmfive-boilerplate
|
.build/setup/service/web.py
|
Python
|
gpl-3.0
| 2,888
|
import logging
from base import ActionRunner, ActionManager
from collections import namedtuple
from gevent.pool import Group
from gevent import sleep
from robotActionController.Data.storage import StorageFactory
from robotActionController.Data.Model import Action
class GroupRunner(ActionRunner):
supportedClass = 'GroupAction'
Runable = namedtuple('GroupAction', ActionRunner.Runable._fields + ('actions',))
def __init__(self, group, robot, *args, **kwargs):
super(GroupRunner, self).__init__(group)
self._robot = robot
self._handle = None
def _runInternal(self, action):
manager = ActionManager.getManager(self._robot)
handles = [manager.executeActionAsync(a) for a in action.actions]
self._handle = Group([h for h in handles if h])
self.waitForComplete()
self._output.extend([o for h in handles for o in h.output if h])
return all([h.value for h in handles if h])
@staticmethod
def getRunable(action):
if type(action) == dict and action.get('type', None) == GroupRunner.supportedClass:
actionCopy = dict(action)
actions = actionCopy['actions']
actionCopy['actions'] = []
for groupAction in actions:
action = None
if 'action' not in groupAction:
id_ = groupAction.get('action_id', None) or groupAction.get('id', None)
if id_:
session = StorageFactory.getNewSession()
action = ActionRunner.getRunable(session.query(Action).get(id_))
session.close()
else:
action = ActionRunner.getRunable(groupAction['action'])
actionCopy['actions'].append(action)
return GroupRunner.Runable(actionCopy['name'],
actionCopy.get('id', None),
actionCopy['type'],
actionCopy['actions'])
elif action.type == GroupRunner.supportedClass:
actions = [ActionRunner.getRunable(a) for a in action.actions]
return GroupRunner.Runable(action.name, action.id, action.type, actions)
else:
logger = logging.getLogger(GroupRunner.__name__)
logger.error("Action: %s has an unknown action type: %s" % (action.name, action.type))
return None
def isValid(self, group):
valid = True
for action in group.actions:
valid = valid & ActionRunner(self._robot).isValid(action)
if not valid:
break
def waitForComplete(self):
if self._handle:
self._handle.join()
|
scheunemann/robotActionController
|
robotActionController/ActionRunner/groupRunner.py
|
Python
|
mit
| 2,761
|
#!/usr/bin/env python3.7
#-----------------------------------------------------------------------------
#
# 2019-03-17
#-----------------------------------------------------------------------------
import unittest
from unittest.mock import patch, Mock
import json
class TestRdapC(unittest.TestCase):
def setUp(self):
try:
from minirdapc.rdap_client import rdap_client
self.rdapc = rdap_client("https://rdap.lacnic.net/rdap")
self.r = True
except:
self.r = False
raise
# end setup
#
def tearDown(self):
# self.rdapc.rdap_cache.close()
pass
# end tearDown
def test_start(self):
self.assertTrue(self.r)
# end test
def test_http_get(self):
res = self.rdapc.rdap_http_get("/ip/200.7.84.1")
self.assertTrue(res['rdapConformance'][0] == 'rdap_level_0')
# end test
#
def test_rdap_query_ip_single(self):
res = self.rdapc.rdap_query("ip", "200.7.84.1")
self.assertTrue(res['rdapConformance'][0] == 'rdap_level_0')
self.assertTrue(res['objectClassName'] == 'ip network')
# end
#
def test_rdap_query_ip_network(self):
res = self.rdapc.rdap_query("ip", "200.7.84.0/24")
self.assertTrue(res['rdapConformance'][0] == 'rdap_level_0')
self.assertTrue(res['objectClassName'] == 'ip network')
# end
#
def test_rdap_query_autnum(self):
res = self.rdapc.rdap_query("autnum", "28001")
self.assertTrue(res['rdapConformance'][0] == 'rdap_level_0')
self.assertTrue(res['objectClassName'] == 'autnum')
# end
#
def test_rdap_query_entity(self):
res = self.rdapc.rdap_query("entity", "UY-LACN-LACNIC")
self.assertTrue(res['rdapConformance'][0] == 'rdap_level_0')
self.assertTrue(res['objectClassName'] == 'entity')
# end
#
def test_pyjq_query_response(self):
res = self.rdapc.rdap_query("ip", "200.7.84.0/24")
object_class_name = self.rdapc._pyjq('.objectClassName', res)
self.assertTrue(object_class_name == 'ip network')
# end
#
def test_pyjq_query_response_implicit_json(self):
res = self.rdapc.rdap_query("ip", "200.7.84.0/24")
object_class_name = self.rdapc._pyjq('.objectClassName')
self.assertTrue(object_class_name == 'ip network')
# end
#
def test_get_poc_simple(self):
res = self.rdapc.rdap_query("ip", "200.7.84.0/24")
poc = self.rdapc.get_poc('abuse', 0)
self.assertTrue(poc == "ABL2", msg = "poc: {}".format(poc) )
poc = self.rdapc.get_poc('technical', 0)
self.assertTrue(poc == "AIL", msg = "poc: {}".format(poc) )
# end
#
def test_get_poc_deep(self):
res = self.rdapc.rdap_query("ip", "200.7.84.0/24")
poc = self.rdapc.get_poc('technical', 1)
self.assertTrue(poc['handle'] == "AIL", msg = "poc: {}".format(poc) )
self.assertTrue(poc['email'] == "ipadmin@lacnic.net", msg = "poc: {}".format(poc) )
# end
def test_get_poc_deep2(self):
res = self.rdapc.rdap_query("ip", "200.7.84.0/24")
poc = self.rdapc.get_poc('abuse', 1)
self.assertTrue(poc['handle'] == "ABL2", msg = "poc: {}".format(poc) )
self.assertTrue(poc['email'] == "ipabuse@lacnic.net", msg = "poc: {}".format(poc) )
# end
# end class TestRdapC
if __name__ == '__main__':
print("TESTING minirdapc - (c) carlos@xt6.us, March 2019\n")
unittest.main()
#-----------------------------------------------------------------------------
|
carlosm3011/cm2c-skunkworks
|
graduated/30-minirdapc/test.py
|
Python
|
bsd-2-clause
| 3,605
|
"""
Radiative transfer solvers for the atmosphere model.
Modules:
pydisort - Python implementation of CDISORT
twostream - Fast numerical solver for heterogenous layers
"""
from . import twostream
from . import pydisort
__all__ = ['twostream','pydisort']
|
adamkovics/atmosphere
|
atmosphere/rt/__init__.py
|
Python
|
gpl-2.0
| 261
|
from .struct import Struct
from .types import Int16, Int32, String, Schema
class RequestHeader(Struct):
SCHEMA = Schema(
('api_key', Int16),
('api_version', Int16),
('correlation_id', Int32),
('client_id', String('utf-8'))
)
def __init__(self, request, correlation_id=0, client_id='kafka-python'):
super(RequestHeader, self).__init__(
request.API_KEY, request.API_VERSION, correlation_id, client_id
)
|
zackdever/kafka-python
|
kafka/protocol/api.py
|
Python
|
apache-2.0
| 476
|
"""JSON implementations of relationship searches."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import objects
from . import queries
from .. import utilities
from ..osid import searches as osid_searches
from ..primitives import Id
from ..utilities import get_registry
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.relationship import searches as abc_relationship_searches
class RelationshipSearch(abc_relationship_searches.RelationshipSearch, osid_searches.OsidSearch):
"""The search interface for governing relationship searches."""
def __init__(self, runtime):
self._namespace = 'relationship.Relationship'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_relationships(self, relationship_ids):
"""Execute this search among the given list of relationships.
arg: relationship_ids (osid.id.IdList): list of relationships
raise: NullArgument - ``relationship_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = relationship_ids
@utilities.arguments_not_none
def order_relationship_results(self, relationship_search_order):
"""Specify an ordering to the search results.
arg: relationship_search_order
(osid.relationship.RelationshipSearchOrder):
relationship search order
raise: NullArgument - ``relationship_search_order`` is ``null``
raise: Unsupported - ``relationship_search_order`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_relationship_search_record(self, relationship_search_record_type):
"""Gets the relationship search record corresponding to the given relationship search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: relationship_search_record_type (osid.type.Type): a
relationship search record type
return: (osid.relationship.records.RelationshipSearchRecord) -
the relationship search record
raise: NullArgument - ``relationship_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported -
``has_record_type(relationship_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class RelationshipSearchResults(abc_relationship_searches.RelationshipSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'relationship.Relationship'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_relationships(self):
"""Gets the relationship list resulting from a search.
return: (osid.relationship.RelationshipList) - the relationship
list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.RelationshipList(self._results, runtime=self._runtime)
relationships = property(fget=get_relationships)
def get_relationship_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.relationship.RelationshipQueryInspector) - the
relationship query inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.RelationshipQueryInspector(self._query_terms, runtime=self._runtime)
relationship_query_inspector = property(fget=get_relationship_query_inspector)
@utilities.arguments_not_none
def get_relationship_search_results_record(self, relationship_search_record_type):
"""Gets the relationship search results record corresponding to the given relationship search record ``Type``.
This method must be used to retrieve an object implementing the
requested record interface along with all of its ancestor
interfaces.
arg: relationship_search_record_type (osid.type.Type): a
relationship search record type
return:
(osid.relationship.records.RelationshipSearchResultsReco
rd) - the relationship search results record
raise: NullArgument - ``relationship_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported -
``has_record_type(relationship_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class FamilySearch(abc_relationship_searches.FamilySearch, osid_searches.OsidSearch):
"""The search interface for governing family searches."""
def __init__(self, runtime):
self._namespace = 'relationship.Family'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_families(self, family_ids):
"""Execute this search among the given list of families.
arg: family_ids (osid.id.IdList): list of families
raise: NullArgument - ``family_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = family_ids
@utilities.arguments_not_none
def order_family_results(self, family_search_order):
"""Specify an ordering to the search results.
arg: family_search_order
(osid.relationship.FamilySearchOrder): family search
order
raise: NullArgument - ``family_search_order`` is ``null``
raise: Unsupported - ``family_search_order`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_family_search_record(self, family_search_record_type):
"""Gets the family search record corresponding to the given family search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: family_search_record_type (osid.type.Type): a family
search record type
return: (osid.relationship.records.FamilySearchRecord) - the
family search record
raise: NullArgument - ``family_search_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported -
``has_record_type(family_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class FamilySearchResults(abc_relationship_searches.FamilySearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search and is used as a vehicle to perform a search within a previous result set."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'relationship.Family'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_families(self):
"""Gets the family list resulting from a search.
return: (osid.relationship.FamilyList) - the family list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.FamilyList(self._results, runtime=self._runtime)
families = property(fget=get_families)
def get_family_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.relationship.FamilyQueryInspector) - the family
query inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.FamilyQueryInspector(self._query_terms, runtime=self._runtime)
family_query_inspector = property(fget=get_family_query_inspector)
@utilities.arguments_not_none
def get_family_search_results_record(self, family_search_record_type):
"""Gets the family search results record corresponding to the given family search record Type.
This method is used to retrieve an object implementing the
requested record.
arg: family_search_record_type (osid.type.Type): a family
search record type
return: (osid.relationship.records.FamilySearchResultsRecord) -
the family search results record
raise: NullArgument - ``FamilySearchRecordType`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported -
``has_record_type(family_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
|
mitsei/dlkit
|
dlkit/json_/relationship/searches.py
|
Python
|
mit
| 11,651
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageCreateResult(Model):
"""ImageCreateResult.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar source_url:
:vartype source_url: str
:ivar status: Possible values include: 'OK', 'OKDuplicate', 'ErrorSource',
'ErrorImageFormat', 'ErrorImageSize', 'ErrorStorage', 'ErrorLimitExceed',
'ErrorTagLimitExceed', 'ErrorUnknown'
:vartype status: str or
~azure.cognitiveservices.vision.customvision.training.models.enum
:ivar image:
:vartype image:
~azure.cognitiveservices.vision.customvision.training.models.Image
"""
_validation = {
'source_url': {'readonly': True},
'status': {'readonly': True},
'image': {'readonly': True},
}
_attribute_map = {
'source_url': {'key': 'SourceUrl', 'type': 'str'},
'status': {'key': 'Status', 'type': 'str'},
'image': {'key': 'Image', 'type': 'Image'},
}
def __init__(self):
super(ImageCreateResult, self).__init__()
self.source_url = None
self.status = None
self.image = None
|
lmazuel/azure-sdk-for-python
|
azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/image_create_result.py
|
Python
|
mit
| 1,635
|
# -*- coding: utf-8 -*-
from scrapy import signals, Request
from scrapy.exceptions import CloseSpider, NotConfigured
from scrapy.loader import ItemLoader
from rojak_pantau.spiders.base import BaseSpider
from datetime import datetime
from rojak_pantau.items import News
from rojak_pantau.util.wib_to_utc import wib_to_utc
from rojak_pantau.i18n import _
class SindonewsSpider(BaseSpider):
name="sindonews"
allowed_domains=["sindonews.com"]
start_urls=(
'http://metro.sindonews.com/topic/7282/pilgub-dki/',
)
def parse(self, response):
self.logger.info('parse: {}'.format(response))
is_no_update = False
for article in response.css('li > div.breaking-title'):
# http://metro.sindonews.com/read/1146316/171/penyidik-bareskrim-mulai-dalami-video-dugaan-penistaan-agama-1476179831
url_selectors = article.css('a::attr(href)')
if not url_selectors:
raise CloseSpider('url_selectors not found')
url = url_selectors.extract()[0]
# Example 'Kamis, 13 Oktober 2016 - 11:18 WIB'
date_time_str_selectors = article.css('p::text')
if not date_time_str_selectors:
raise CloseSpider('date_time_str_selectors not found')
date_time_str = date_time_str_selectors.extract()[0]
# Parse date information
# Example '13 Oktober 2016 - 11:18'
date_time_str = date_time_str.split(',')[1].strip()[:-4]
date_time_str = ' '.join([_(w) for w in date_time_str.split(' ')])
try:
published_at_wib = datetime.strptime(date_time_str, '%d %B %Y - %H:%M')
except Exception as e:
raise CloseSpider('cannot_parse_date: %s' % e)
published_at = wib_to_utc(published_at_wib)
if self.media['last_scraped_at'] >= published_at:
is_no_update = True
break
# For each url we create new scrapy request
yield Request(url, callback=self.parse_news)
if is_no_update:
self.logger.info('Media have no update')
return
for next_button in response.css('.mpaging > ul > li'):
if len(next_button.css('a:not(.active) > .fa-angle-right')) > 0:
next_page = next_button.css('a::attr(href)').extract()[0]
next_page_url = response.urljoin(next_page)
yield Request(next_page_url, callback=self.parse)
break
# Collect news item
def parse_news(self, response):
self.logger.info('parse_news: %s' % response)
# Initialize item loader
# extract news title, published_at, author, content, url
loader = ItemLoader(item=News(), response=response)
loader.add_value('url', response.url)
title_selectors = response.css('h1[itemprop="headline"]::text')
if not title_selectors:
# Will be dropped on the item pipeline
return loader.load_item()
title = title_selectors.extract()[0]
loader.add_value('title', title)
author_name_selectors = response.css('a[rel="author"] > span::text')
if not author_name_selectors:
loader.add_value('author_name', '')
else:
author_name = author_name_selectors.extract()[0]
loader.add_value('author_name', author_name)
raw_content_selectors = response.css('.content')
if not raw_content_selectors:
# Will be dropped on the item pipeline
return loader.load_item()
raw_content = raw_content_selectors.extract()
raw_content = ' '.join([w.strip() for w in raw_content])
raw_content = raw_content.strip()
loader.add_value('raw_content', raw_content)
date_time_str_selectors = response.css('article > div.time::text')
if not date_time_str_selectors:
# Will be dropped on the item pipeline
return loader.load_item()
# Parse date information
# Example: Selasa, 6 Oktober 2015 - 05:23 WIB
date_time_str = date_time_str_selectors.extract()[0]
date_time_str = date_time_str.split(',')[1].strip()[:-4]
date_time_str = ' '.join([_(w) for w in date_time_str.split(' ')])
try:
published_at_wib = datetime.strptime(date_time_str, '%d %B %Y - %H:%M')
except ValueError:
# Will be dropped on the item pipeline
return loader.load_item()
published_at = wib_to_utc(published_at_wib)
loader.add_value('published_at', published_at)
# Move scraped news to pipeline
return loader.load_item()
|
CodeRiderz/rojak
|
rojak-pantau/rojak_pantau/spiders/sindonews.py
|
Python
|
bsd-3-clause
| 4,716
|
template_string = '''#!/bin/bash
#PBS -S /bin/bash
#PBS -N ${jobname}
#PBS -m n
#PBS -l walltime=$walltime
#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
${scheduler_options}
${worker_init}
export JOBNAME="${jobname}"
${user_script}
'''
|
swift-lang/swift-e-lab
|
parsl/providers/torque/template.py
|
Python
|
apache-2.0
| 358
|
'''
Problem 21
05 July 2002
Let d(n) be defined as the sum of proper divisors of n (numbers less
than n which divide evenly into n).
If d(a) = b and d(b) = a, where a != b, then a and b are an amicable
pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22,
44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1,
2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
----------------------------------------------------------
Created on 04.02.2012
@author: ahallmann
'''
import unittest
import timeit
def generate_proper_divisors(n):
for x in range(1, int(n/2)+1):
if n % x == 0:
yield x
def d(n):
return sum(generate_proper_divisors(n))
def solve(limit):
amicable_numbers = []
for a in range(1, limit):
b = d(a)
if a != b and d(b) == a:
amicable_numbers.append(a)
return amicable_numbers
class Test(unittest.TestCase):
def test_sample(self):
self.assertEqual(284, d(220))
self.assertEqual(220, d(284))
def test_answer(self):
amicable_numbers = solve(10000)
self.assertEqual([220, 284, 1184, 1210, 2620, 2924, 5020, 5564, 6232, 6368], amicable_numbers)
self.assertEqual(31626, sum(amicable_numbers))
pass
# -----------------------------------------
def run():
return solve(10000)
if __name__ == '__main__':
unittest.main()
#
# if __name__ == '__main__':
# t = timeit.Timer("run()", "from __main__ import run")
# count = 1
# print str(t.timeit(count)) + " seconds for " + str(count) + " runs"
#
#
|
arturh85/projecteuler
|
python/src/problem021.py
|
Python
|
mit
| 1,841
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Load modules
try:
import imp
import sys
import os
import time
import prefcheck
import gui
#import update
import json
except ImportError:
raise Exception("Error: Faild to import critical modules.")
sys.exit(1)
def sysCheck():
#start writing to sysCheck.log
unlog = sys.stdout
sysCheckLog = open("sysCheck.log", 'w')
sys.stdout = sysCheckLog
print("\nNote : Doing system check...")
#Print OS and time
print("Note : Timestamp %s, %s" %(time.strftime("%d/%m/%Y"), time.strftime("%H:%M:%S")))
print("Note : Running on platform '%s'" %(sys.platform))
#Test Python Version
pyVersion = sys.hexversion
if (pyVersion < 0x04000000) or (pyVersion > 0x03000000):
print("Note : Running Python version '%s' " %(pyVersion))
else:
print("Error: Running Python version '%s'. Python 3.x is required " %(pyVersion))
sys.exit(1)
print("Note : Rolling updates enabled, '%s'" %('no'))
print("Note : noodle-pipe release version '%s'" %('0.0.0'))
#Check core dependencies
jsonFile = open('../modules/data/dependencies.json', 'r')
jsonData = json.load(jsonFile)
jsonFile.close()
for keys, values in jsonData.items():
for items in jsonData[keys]:
try:
imp.find_module(items)
except:
print("Error: Found core module '%s', no" %(items))
raise Exception("Found core module '%s', no" %(items))
sys.exit(1)
else:
print("Note : Found core module '%s', yes" %(items))
#Check plugin dependencies
dirList = os.listdir('../plugins/')
fileList = []
plugModules = []
for items in dirList:
print("Note : Found plugin '%s' in '%s'" %(items, "../plugins/%s/" %(items)))
if os.path.isfile("../plugins/%s/data" %(items)):
jsonFile = open("../plugins/%s/data" %(items), 'r')
jsonData = json.load(jsonFile)
jsonFile.close
for keys, values in jsonData.items():
for items in jsonData[keys]:
try:
imp.find_module(items)
except:
print("Error: Found plugin module '%s', no" %(items))
else:
print("Note : Found plugin module '%s', yes" %(items))
else:
print("Warn : Found dependencies.json for plugin '%s', no" %(items))
#raise Exception("Found dependencies.json for plugin '%s', no" %(items))
#Stop writing to sysCheck.log
sys.stdout = unlog
sysCheckLog.close()
print("Note : Read sysCheck.log for details")
#Queryies all noodle-pipe modules for a list of dependencie modules
def modGather():
if os.path.isdir("../modules/data/"):
print("Note : Found path '../modules/data/', yes")
else:
print("Warn : Found path '../modules/data/', no")
print("Warn : Can't determine core module dependencies")
#get list of noodle-pipe core modules
dirList = os.listdir('../modules/')
coreModules = []
for items in dirList:
if items[-2:] == 'py':
coreModules.append(items[:-3])
#get list of plugin modules
dirList = os.listdir('../plugins/')
fileList = []
plugModules = []
for items in dirList:
fileList.append(os.listdir("../plugins/%s/data" %(items)))
#if items[-2:] == 'py':
# modules.append(items[:-3])
print(fileList)
#Determines what modules are availible and which arnt
def modCheck(modules=None):
if (modules is None) or (modules < 1):
raise Exception("Expected type 'List' with length > 0")
else:
moduleDic = {}
for mods in modules:
try:
imp.find_module(mods)
except:
moduleDic[str(mods)] = False
else:
moduleDic[str(mods)] = True
return(moduleDic)
#Prints the results of modCheck() to the terminal
def modPrint(modules=None):
if (modules is None) or (modules < 1):
raise Exception("Expected type 'List' with length > 0")
else:
for keys, values in modules.items():
if values is False:
print("Error: Found module '%s', no" %(keys))
else:
print("Note : Found module '%s', yes" %(keys))
if __name__ == '__main__':
print("Error: Improper usage of 'syscheck', See documentaion for proper usage")
raise Exception("Improper usage of 'syscheck', See documentaion for proper usage")
|
TheEnvironmentGuy/noodle-pipe
|
Source/Qt5/modules/syscheck.py
|
Python
|
mit
| 4,017
|
#!/usr/bin/env python
"""
Copyright 2015-2020 Knights Lab, Regents of the University of Minnesota.
This software is released under the GNU Affero General Public License (AGPL) v3.0 License.
"""
import click
import os
from ninja_utils.utils import verify_make_dir
from ninja_utils.parsers import FASTA
from dojo.database import RefSeqDatabase
from dojo.taxonomy import NCBITree
from dojo.annotaters import GIAnnotater, RefSeqAnnotater, NTAnnotater
from shogun.wrappers import bowtie2_build
@click.command()
@click.option('-i', '--input', type=click.Path(), default='-', help='The input FASTA file for annotating with NCBI TID (default=stdin)')
@click.option('-o', '--output', type=click.Path(), default=os.path.join(os.getcwd(), 'annotated'), help='The directory to output the formatted DB and BT2 db (default=annotated)')
@click.option('-a', '-annotater', type=click.Choice(['gi', 'refseq', 'nt']), default='refseq', help='The annotater to use.',
show_default=True)
@click.option('-x', '--extract_id', default='ref|,|',
help='Characters that sandwich the RefSeq Accession Version in the reference FASTA', show_default=True)
@click.option('--prefixes', default='*', help="Supply a comma-seperated list where the options are choices"
" in ('AC', 'NC', 'NG', 'NM', 'NT', 'NW', 'NZ') e.g. NC,AC default=all")
@click.option('-d', '--depth', default=7, help="The depth to annotate the map")
@click.option('-f', '--depth-force', default=True, help="Force the depth criterion if missing annotation")
def shogun_bt2_db(input, output, annotater, extract_id, prefixes, depth, depth_force):
verify_make_dir(output)
# Verify the FASTA is annotated
if input == '-':
output_fn = 'stdin'
else:
output_fn = '.'.join(str(os.path.basename(input)).split('.')[:-1])
outf_fasta = os.path.join(output, output_fn + '.annotated.fna')
outf_map = os.path.join(output, output_fn + '.annotated.map')
if not os.path.isfile(outf_fasta) or not os.path.isfile(outf_map):
tree = NCBITree()
db = RefSeqDatabase()
if annotater == 'refseq':
annotater_class = RefSeqAnnotater(extract_id, prefixes, db, tree, depth=depth, depth_force=depth_force)
elif annotater == 'nt':
annotater_class = NTAnnotater(extract_id, prefixes, db, tree, depth=depth, depth_force=depth_force)
else:
annotater_class = GIAnnotater(extract_id, db, tree, depth=depth, depth_force=depth_force)
with open(outf_fasta, 'w') as output_fna:
with open(outf_map, 'w') as output_map:
with open(input) as inf:
inf_fasta = FASTA(inf)
for lines_fna, lines_map in annotater_class.annotate(inf_fasta.read()):
output_fna.write(lines_fna)
output_map.write(lines_map)
else:
print("Found the output files \"%s\" and \"%s\". Skipping the annotation phase for this file." % (
outf_fasta, outf_map))
# Build the output BT2 database
verify_make_dir(os.path.join(output, 'bt2'))
print(bowtie2_build(outf_fasta, os.path.join(output, 'bt2', output_fn)))
if __name__ == '__main__':
shogun_bt2_db()
|
knights-lab/NINJA-SHOGUN
|
shogun/scripts/old/shogun_bt2_db.py
|
Python
|
mit
| 3,276
|
#!/usr/bin/env python
from ZSI import dispatch
from Test_server import EchoResponse
from Test_server import AddResponse
def echo(message):
response = EchoResponse()
response._Message = message
return response
def add( operators ):
response = AddResponse()
print operators
response._Result = 0
for o in operators:
op = operators[o]
response._Result += op
return response
if __name__ == '__main__':
dispatch.AsServer(port=8080)
|
rd2b/tools
|
python-soaptest/src/server.py
|
Python
|
gpl-3.0
| 482
|
import pygraphviz as pgv
from lxml import etree
import textwrap
from flask import Flask
import json
import os
import copy
class SyllabusGraph(pgv.AGraph):
def __init__(self, style_path, is_embedded=False):
super(SyllabusGraph,self).__init__(overlap='false', outputorder='edgesfirst')
self.is_embedded = is_embedded
with open(style_path) as f:
self.style = json.loads(f.read())
for key in self.style:
if 'inherit' in self.style[key]:
parent = self.style[key]['inherit']
overwrites = self.style[key]
overwrites.pop('inherit', None)
self.style[key] = copy.deepcopy(self.style[parent])
self.style[key].update(overwrites)
def add_unit_node(self, unit, is_central=False):
wrapped_name = textwrap.fill(unit.name, width = 15)
style = None
if is_central:
style = self.style['central_unit']
else:
style = self.style['unit_' + str(unit.get_year())]
node_name = "unit_{}".format(unit.id)
self.add_node(node_name,
id=node_name,
label=wrapped_name,
URL='#/graph/unit/{}'.format(unit.code),
**style)
return node_name
def add_category_node(self, category, weight):
label = category.name.split(":",1)[1]
label = '\n'.join(textwrap.wrap("%s" % label, width = 15))
node_name = "category_{}".format(category.id)
self.add_node(node_name,
id=node_name,
label=label,
width=1.7+((weight-1)*0.5),
fontsize=14+(weight-1),
URL='#/graph/category/{}'.format(category.id),
**self.style['category'])
return node_name
def add_topic_node(self, topic, is_central=False):
label = None
if self.is_embedded:
label = topic.name
else:
label = (u'<<table border="0" cellpadding="5" cellspacing="0" cellborder="0">' +
u'<tr><td href="#/graph/topic/{0}" title="Topic page" valign="middle">{1}</td>' +
u'<td valign="middle" href="//en.wikipedia.org/wiki/{1}" title="Wikipedia article" target="_blank_"><font face="Glyphicons Halflings" point-size="12" color="#666666">\ue164</font></td></tr>' +
u'</table>>')
label = label.format(
topic.id,
topic.name)
style = self.style['central_topic'] if is_central else self.style['topic']
node_name = self.topic_node_name(topic)
self.add_node(node_name,
id=node_name,
label=label,
**style)
return node_name
def add_edge(self, source, target):
super(SyllabusGraph, self).add_edge(source, target, **self.style['edge'])
def add_category_edge(self, source, target):
super(SyllabusGraph, self).add_edge(source, target, **self.style['category_edge'])
def render_svg(self):
self.layout(prog='neato')
svg = self.draw(format='svg').decode('utf-8')
svgparser = etree.XMLParser(encoding='utf-8')
svgobj = etree.fromstring(svg.encode('utf-8'), parser=svgparser)
svgobj.attrib['width'] = "100%"
svgobj.attrib['height'] = "100%"
for n in svgobj.xpath('//n:text', namespaces={'n': "http://www.w3.org/2000/svg"}):
n.attrib['text-rendering'] = 'geometricPrecision'
return etree.tostring(svgobj, pretty_print=True)
@staticmethod
def topic_node_name(topic):
return 'topic_{}'.format(topic.id)
|
UoMCS/syllabus-visualisation
|
server/graph.py
|
Python
|
mit
| 3,655
|
#!/usr/bin/python
#
# release_notes.py - "I can't believe it's not a web browser."
#
# David Cantrell <dcantrell@redhat.com>
#
# Copyright 2006 Red Hat, Inc.
#
# This software may be freely redistributed under the terms of the GNU
# library public license.
#
# You should have received a copy of the GNU Library Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import sys
import os
import signal
import gtk
import gtkhtml2
import urllib
import urlparse
import gui
from rhpl.translate import _, N_
class ReleaseNotesViewer:
def __init__(self, anaconda):
self.currentURI = None
self.htmlheader = "<html><head><meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"></head><body bgcolor=\"white\"><pre>"
self.htmlfooter = "</pre></body></html>"
self.doc = gtkhtml2.Document()
self.vue = gtkhtml2.View()
self.opener = urllib.FancyURLopener()
# FIXME: these do not work, disabling for FC6 --dcantrell
#self.doc.connect('request_url', self.requestURLCallBack)
#self.doc.connect('link_clicked', self.linkClickedCallBack)
#self.vue.connect('request_object', self.requestObjectCallBack)
self.topDir = None
self.width = None
self.height = None
self.is_showing = False
self.anaconda = anaconda
self.load()
self.resize()
self.setupWindow()
def getReleaseNotes(self):
langs = self.anaconda.id.instLanguage.getCurrentLangSearchList() + [ "" ]
suffixList = []
for lang in langs:
if lang:
suffixList.append("-%s.html" % (lang,))
suffixList.append(".%s" % (lang,))
for suffix in suffixList:
fn = "RELEASE-NOTES%s" % (suffix,)
try:
tmpfile = os.path.abspath(self.anaconda.dispatch.method.getFilename(fn, destdir="/tmp", retry=0))
if tmpfile is None:
continue
# Just because we got a filename back doesn't
# mean it's a valid file. Check that it's not
# zero length too.
st = os.stat(tmpfile)
if st.st_size == 0L:
os.remove(tmpfile)
continue
self.topDir = os.path.dirname(tmpfile)
return tmpfile
except:
continue
return None
def resize(self, w=None, h=None):
sw = gtk.gdk.screen_width()
(step, args) = self.anaconda.dispatch.currentStep()
if w is None:
if sw >= 800:
self.width = 800
else:
self.width = 640
else:
self.width = int(w)
# if we are at the installation progress bar step, make the
# release notes window smaller so the progress bar is still
# visible...otherwise, consume the entire screen
if h is None:
if sw >= 800:
if step == "installpackages":
self.height = 445
else:
self.height = 600
else:
if step == "installpackages":
self.height = 300
else:
self.height = 480
else:
self.height = int(h)
# FIXME: replace with logger from anaconda_log (fix exec first)
def log(self, string):
print string
def load(self, uri=None):
def loadWrapper(baloney):
self.doc.open_stream('text/html')
self.doc.write_stream(self.htmlheader)
self.doc.write_stream(baloney)
self.doc.write_stream(self.htmlfooter)
if uri is None:
uri = self.getReleaseNotes()
if uri is not None:
if os.access(uri, os.R_OK):
try:
f = self.openURI(uri)
except OSError:
self.log("Failed to open %s" % (uri,))
return
if f is not None:
self.doc.clear()
headers = f.info()
mime = headers.getheader('Content-type')
if mime:
self.doc.open_stream(mime)
self.doc.write_stream(f.read())
else:
loadWrapper(f.read())
self.doc.close_stream()
f.close()
self.currentURI = self.resolveURI(uri)
else:
loadWrapper(_("Release notes are missing.\n"))
self.currentURI = None
else:
loadWrapper(_("Release notes are missing.\n"))
self.currentURI = None
def isShowing(self):
return self.is_showing
def hide(self):
if self.textWin is not None:
self.textWin.hide_all()
self.is_showing = False
def setupWindow(self):
self.vue.set_document(self.doc)
self.textWin = gtk.Window()
self.textWin.connect("delete-event", self.closedCallBack)
mainbox = gtk.VBox(False, 6)
self.textWin.add(mainbox)
table = gtk.Table(3, 3, False)
mainbox.pack_start(table)
mainbox.pack_start(gtk.HSeparator(), False, False)
bb = gtk.HButtonBox()
bb.set_property("layout-style", gtk.BUTTONBOX_END)
b = gtk.Button(stock="gtk-close")
b.connect("clicked", self.closedCallBack)
bb.pack_start(b)
mainbox.pack_start(bb, False, False)
vbox1 = gtk.VBox()
vbox1.set_border_width(10)
frame = gtk.Frame("")
frame.add(vbox1)
frame.set_label_align(0.5, 0.5)
frame.set_shadow_type(gtk.SHADOW_NONE)
self.textWin.set_position(gtk.WIN_POS_NONE)
self.textWin.set_gravity(gtk.gdk.GRAVITY_NORTH_WEST)
if self.vue is not None:
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_IN)
sw.add(self.vue)
vbox1.pack_start(sw)
a = gtk.Alignment(0, 0, 1.0, 1.0)
a.add(frame)
self.textWin.set_default_size(self.width, self.height)
self.textWin.set_size_request(self.width, self.height)
# we want the release notes dialog to be the same
# size as the main installer window so it covers it
# up completely. this isn't always the same size
# as the root window, so figure out our northwest
# origin point and then move the window
if gtk.gdk.screen_width() == self.width:
self.textWin.move(0, 0)
else:
# the width will always be fixed, but our
# height changes depending on the installation
# stage, so do the origin point calculations
# using what would be the full height
if self.width == 800:
fullh = 600
elif self.width == 640:
fullh = 480
left = (gtk.gdk.screen_width() - self.width) / 2
top = (gtk.gdk.screen_height() - fullh) / 2
self.textWin.move(left, top)
table.attach(a, 1, 2, 1, 2, gtk.FILL | gtk.EXPAND, gtk.FILL | gtk.EXPAND, 5, 5)
self.textWin.set_border_width(0)
gui.addFrame(self.textWin, _("Release Notes"))
else:
self.textWin.set_position(gtk.WIN_POS_CENTER)
label = gtk.Label(_("Unable to load file!"))
table.attach(label, 1, 2, 1, 2, gtk.FILL | gtk.EXPAND, gtk.FILL | gtk.EXPAND, 5, 5)
self.textWin.set_border_width(0)
gui.addFrame(self.textWin)
def view(self):
self.textWin.show_all()
# set cursor to normal (assuming that anaconda set it to busy
# when it exec'd this viewer app to give progress indicator
# to user).
root = gtk.gdk.get_default_root_window()
cursor = gtk.gdk.Cursor(gtk.gdk.LEFT_PTR)
root.set_cursor(cursor)
self.is_showing = True
def resolveURI(self, link):
parts = urlparse.urlparse(link)
if parts[0] or parts[1]:
return link
else:
# FIXME: does not work right now
#return urlparse.urljoin(self.currentURI, link)
return link
def openURI(self, link):
try:
ret = self.opener.open(self.resolveURI(link))
except IOError:
ret = None
return ret
def closedCallBack(self, *args):
self.textWin.hide_all()
self.is_showing = False
def linkClickedCallBack(self, document, link):
if link[0] == '#':
self.log("jump to anchor: %s" % (link,))
self.vue.jump_to_anchor(link)
else:
self.load(link)
def requestURLCallBack(self, document, url, stream):
try:
f = self.openURI(url)
stream.write(f.read())
except:
# we'll try local from self.topDir
url = os.path.abspath(self.topDir + '/' + url)
try:
f = self.openURI(url)
stream.write(f.read())
except:
self.log("requested url not found: %s" % (url,))
def requestObjectCallBack(self, *args):
self.log("request objects call back: %s" % (args))
|
sergey-senozhatsky/anaconda-11-vlan-support
|
iw/release_notes.py
|
Python
|
gpl-2.0
| 7,725
|
from django.conf.urls import patterns, include, url
from rms.views import *
from django.conf.urls.defaults import *
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.conf.urls.defaults import patterns, include
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
dajaxice_autodiscover()
admin.autodiscover()
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
# These urls are mapping to different views in views.py file.
urlpatterns = patterns('',
# Home page of RMS
(r'^$', main_page),
# Login / logout.
(r'^login/$', 'django.contrib.auth.views.login'),
(r'^logout/$', logout_page),
# Serve static content. This line is important to serve the static content i.e the css,javascript files.
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': 'static'}),
# User profile page
(r'^profile/$', profile),
#Booking page
(r'^booking/$', booking),
(r'^changepass/$', 'django.contrib.auth.views.password_change'),
(r'^done/$', 'django.contrib.auth.views.password_change_done'),
#Guest user page
(r'^guest/$', guest),
# User homepage.
(r'^dashboard/', include('dashboard.urls')),
# New user registration page
(r'^registration/$',register),
# FAQs page
(r'^help/$',help),
# Creditss page
(r'^credits/$',credits),
#This mapping is important for incorporating Django's Admin module in our project. If removed Admin module won't work.
(r'^admin/', include(admin.site.urls)),
# This line imports Dajaxice settings in our project, Important for AJAX functionality in our project. If removed AJAX functionality won't work.
#oldstyle
#(r'^%s/' % settings.DAJAXICE_MEDIA_PREFIX, include('dajaxice.urls')),
#newstyle
url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),
)
urlpatterns += staticfiles_urlpatterns()
|
cseanitc/RMS
|
rms/rms/urls.py
|
Python
|
gpl-3.0
| 2,128
|
import pathlib
import itertools
import shutil
import modelx as mx
from modelx.tests.testdata.testpkg import testmod
import pytest
from importlib.machinery import SourceFileLoader
from importlib.util import spec_from_loader, module_from_spec
SAMPLE_MODULE = pathlib.Path(testmod.__file__)
params = list(itertools.product(
["model", "space"],
["write", "zip", "backup"],
[testmod, SAMPLE_MODULE, str(SAMPLE_MODULE)]
))
@pytest.mark.parametrize("parent, save_meth, module", params)
def test_new_module(tmp_path, parent, save_meth, module):
if parent == "model":
p = mx.new_model(name="Parent")
else:
p = mx.new_model().new_space(name="Parent")
p.new_module(name="Foo", path="Parent/Foo", module=module)
p.Bar = p.Foo
assert p.Foo.modbar(2) == 4
getattr(p.model, save_meth)(tmp_path / "model")
p.model.close()
if save_meth == "backup":
m2 = mx.restore_model(tmp_path / "model")
else:
m2 = mx.read_model(tmp_path / "model")
p2 = m2 if parent == "model" else m2.spaces["Parent"]
assert p2.Foo.modbar(2) == 4
assert p2.Bar is p2.Foo
m2._impl.system._check_sanity(check_members=False)
m2._impl._check_sanity()
# Check saving again
# https://github.com/fumitoh/modelx/issues/45
getattr(p2.model, save_meth)(tmp_path / "model")
m2.close()
if save_meth == "backup":
m3 = mx.restore_model(tmp_path / "model")
else:
m3 = mx.read_model(tmp_path / "model")
m3._impl.system._check_sanity(check_members=False)
m3._impl._check_sanity()
p3 = m3 if parent == "model" else m3.spaces["Parent"]
assert p3.Foo.modbar(2) == 4
assert p3.Bar is p3.Foo
m3.close()
def load_module(path_):
loader = SourceFileLoader("<unnamed module>", path=str(path_))
spec = spec_from_loader(loader.name, loader)
mod = module_from_spec(spec)
loader.exec_module(mod)
return mod
params_update = list(itertools.product(
["model", "space"],
["write", "zip"],
["module", "path-like", "str", None],
))
@pytest.mark.parametrize("parent, save_meth, replace", params_update)
def test_update_module(
tmp_path, parent, save_meth, replace):
if parent == "model":
p = mx.new_model(name="Parent")
else:
p = mx.new_model().new_space(name="Parent")
module1 = tmp_path / SAMPLE_MODULE.name
module2 = tmp_path / "testmod_updated.py"
# Copy sample modules in tmp_path
shutil.copyfile(SAMPLE_MODULE, module1)
shutil.copyfile(SAMPLE_MODULE.parent / "testmod_updated.py", module2)
old_module = p.new_module(name="Foo", path="Parent/Foo", module=module1)
p.Bar = p.Foo
def assert_original(m_or_s):
assert m_or_s.Foo.modfibo(10) == 55
assert m_or_s.Foo.modbar(2) == 4
assert m_or_s.Foo is m_or_s.Bar
assert_original(p)
# Set new_module parameter
if replace == "module":
new_module = load_module(module2)
elif replace == "path-like":
new_module = module2
elif replace == "str":
new_module = str(module2)
else:
new_module = None
shutil.copyfile(module2, module1)
p.update_module(old_module, new_module=new_module)
def assert_updated(m_or_s):
assert m_or_s.Foo.modfibo(10) == 144
assert m_or_s.Foo.modbar(2) == 6
assert m_or_s.Foo is m_or_s.Bar
assert_updated(p)
getattr(p.model, save_meth)(tmp_path / "model")
p.model.close()
m2 = mx.read_model(tmp_path / "model")
p2 = m2 if parent == "model" else m2.spaces["Parent"]
assert_updated(p2)
m2._impl.system._check_sanity(check_members=False)
m2._impl._check_sanity()
# Check saving again
# https://github.com/fumitoh/modelx/issues/45
getattr(p2.model, save_meth)(tmp_path / "model")
m2.close()
m3 = mx.read_model(tmp_path / "model")
m3._impl.system._check_sanity(check_members=False)
m3._impl._check_sanity()
p3 = m3 if parent == "model" else m3.spaces["Parent"]
assert_updated(p3)
m3.close()
|
fumitoh/modelx
|
modelx/tests/io/test_moduleio.py
|
Python
|
gpl-3.0
| 4,061
|
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from reversion.models import Revision
from sapl.base.models import ProblemaMigracao
from sapl.utils import register_all_models_in_admin
register_all_models_in_admin(__name__)
admin.site.unregister(ProblemaMigracao)
admin.site.site_title = 'Administração - SAPL'
admin.site.site_header = 'Administração - SAPL'
@admin.register(ProblemaMigracao)
class ProblemaMigracaoAdmin(admin.ModelAdmin):
list_display = ["content_type", "object_id", "nome_campo", "problema",
"descricao", "get_url"]
def get_url(self, obj):
info = (obj.content_object._meta.app_label,
obj.content_object._meta.model_name)
endereco = reverse('admin:%s_%s_change' % info,
args=(obj.content_object.pk,))
return "<a href='%s'>%s</a>" % (endereco, endereco)
get_url.short_description = "Endereço"
get_url.allow_tags = True
class RevisionAdmin(admin.ModelAdmin):
list_display = ('user', 'comment', 'date_created')
search_fields = ('=user__username', '=user__email')
date_hierarchy = ('date_created')
def change_view(self, request, obj=None):
self.message_user(request, _('You cannot change history.'))
return redirect('admin:reversion_revision_changelist')
admin.site.register(Revision, RevisionAdmin)
|
LeandroRoberto/sapl
|
sapl/base/admin.py
|
Python
|
gpl-3.0
| 1,490
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import itertools
import math
import networkx as nx
import re
from typing import Any, DefaultDict, Dict, List, Union
from django.db import connection
from django.http import HttpRequest, JsonResponse
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view
from catmaid import state
from catmaid.models import UserRole, Treenode, ClassInstance, \
TreenodeConnector, Location, SamplerInterval
from catmaid.control.authentication import requires_user_role, \
can_edit_class_instance_or_fail, can_edit_or_fail
from catmaid.control.common import (get_relation_to_id_map,
get_class_to_id_map, insert_into_log, _create_relation,
get_request_bool, get_request_list)
from catmaid.control.neuron import _delete_if_empty
from catmaid.control.node import _fetch_location, _fetch_locations
from catmaid.control.link import create_connector_link
from catmaid.util import Point3D, is_collinear
def can_edit_treenode_or_fail(user, project_id, treenode_id) -> bool:
""" Tests if a user has permissions to edit the neuron which the skeleton of
the treenode models. Will return true or throw an exception. Cannot return false. """
info = _treenode_info(project_id, treenode_id)
return can_edit_class_instance_or_fail(user, info['neuron_id'], 'neuron')
def can_edit_skeleton_or_fail(user, project_id, skeleton_id, model_of_relation_id) -> bool:
"""Test if a user has permission to edit a neuron modeled by a skeleton. Will return true
or throw an exception. Cannot return false."""
cursor = connection.cursor()
cursor.execute("""
SELECT
ci2.id as neuron_id
FROM
class_instance ci,
class_instance ci2,
class_instance_class_instance cici
WHERE ci.project_id = %s
AND ci.id = %s
AND ci.id = cici.class_instance_a
AND ci2.id = cici.class_instance_b
AND cici.relation_id = %s
""", (project_id, skeleton_id, model_of_relation_id))
if cursor.rowcount == 0:
raise ValueError('No neuron modeled by skeleton %s' % skeleton_id)
neuron_id = cursor.fetchone()[0]
return can_edit_class_instance_or_fail(user, neuron_id, 'neuron')
@requires_user_role(UserRole.Annotate)
def create_treenode(request:HttpRequest, project_id=None) -> JsonResponse:
"""
Add a new treenode to the database
----------------------------------
1. Add new treenode for a given skeleton id. Parent should not be empty.
return: new treenode id
If the parent's skeleton has a single node and belongs to the
'Isolated synaptic terminals' group, then reassign ownership
of the skeleton and the neuron to the user. The treenode remains
property of the original user who created it.
2. Add new treenode (root) and create a new skeleton (maybe for a given
neuron) return: new treenode id and skeleton id.
If a neuron id is given, use that one to create the skeleton as a model of
it.
"""
params = {}
float_values = {
'x': 0,
'y': 0,
'z': 0,
'radius': 0}
int_values = {
'confidence': 0,
'useneuron': -1,
'parent_id': -1}
string_values:Dict = {}
for p in float_values.keys():
params[p] = float(request.POST.get(p, float_values[p]))
for p in int_values.keys():
params[p] = int(request.POST.get(p, int_values[p]))
for p in string_values.keys():
params[p] = request.POST.get(p, string_values[p])
# Get optional initial links to connectors, expect each entry to be a list
# of connector ID, relation ID and confidence.
links = get_request_list(request.POST, 'links', [], map_fn=int)
# Make sure the back-end is in the expected state if the node should have a
# parent and will therefore become part of another skeleton.
parent_id = int(params['parent_id'])
has_parent = parent_id and parent_id != -1
if has_parent:
state.validate_state(parent_id, request.POST.get('state'),
parent_edittime=has_parent, lock=True)
new_treenode = _create_treenode(project_id, request.user, request.user,
params['x'], params['y'], params['z'], params['radius'],
params['confidence'], params['useneuron'], params['parent_id'],
neuron_name=request.POST.get('neuron_name', None))
# Create all initial links
if links:
created_links = create_connector_link(project_id, request.user.id,
new_treenode.treenode_id, new_treenode.skeleton_id, links)
else:
created_links = []
return JsonResponse({
'treenode_id': new_treenode.treenode_id,
'skeleton_id': new_treenode.skeleton_id,
'edition_time': new_treenode.edition_time,
'parent_edition_time': new_treenode.parent_edition_time,
'created_links': created_links
})
@requires_user_role(UserRole.Annotate)
def insert_treenode(request:HttpRequest, project_id=None) -> JsonResponse:
"""
Create a new treenode between two existing nodes. Its creator and
creation_date information will be set to information of child node. No node
will be created, if the node on the edge between the given child and parent
node.
"""
# Use creation time, if part of parameter set
params:Dict[str, float] = {}
float_values = {
'x': 0,
'y': 0,
'z': 0,
'radius': 0
}
int_values = {
'confidence': 0,
'parent_id': -1,
'child_id': -1
}
for p in float_values.keys():
params[p] = float(request.POST.get(p, float_values[p]))
for p in int_values.keys():
params[p] = int(request.POST.get(p, int_values[p]))
# If siblings should be taken over, all children of the parent node will be
# come children of the inserted node. This requires extra state
# information: the child state for the paren.
takeover_child_ids = get_request_list(request.POST,
'takeover_child_ids', None, int)
# Get optional initial links to connectors, expect each entry to be a list
# of connector ID and relation ID.
try:
links = get_request_list(request.POST, 'links', [], int)
except Exception as e:
raise ValueError(f"Couldn't parse list parameter: {e}")
# Make sure the back-end is in the expected state if the node should have a
# parent and will therefore become part of another skeleton.
parent_id = params.get('parent_id')
child_id = params.get('child_id')
if parent_id not in (-1, None):
s = request.POST.get('state')
# Testing egular edge insertion is assumed if a child ID is provided
partial_child_checks = [] if child_id in (-1, None) else [child_id]
if takeover_child_ids:
partial_child_checks.extend(takeover_child_ids)
state.validate_state(parent_id, s, node=True,
children=partial_child_checks or False, lock=True),
# Find child and parent of new treenode
child = Treenode.objects.get(pk=params['child_id'])
parent = Treenode.objects.get(pk=params['parent_id'])
# Make sure both nodes are actually child and parent
if not child.parent == parent:
raise ValueError('The provided nodes need to be child and parent')
# Make sure the requested location for the new node is on the edge between
# both existing nodes if the user has no edit permissions on the neuron.
try:
can_edit_treenode_or_fail(request.user, project_id, parent.id)
user, time = request.user, None
except:
child_loc = Point3D(child.location_x, child.location_y, child.location_z)
parent_loc = Point3D(parent.location_x, parent.location_y, parent.location_z)
new_node_loc = Point3D(params['x'], params['y'], params['z'])
if not is_collinear(child_loc, parent_loc, new_node_loc, True, 0.001):
raise ValueError('New node location has to be between child and parent')
# Use creator and creation time for neighboring node that was created last.
if child.creation_time < parent.creation_time:
user, time = parent.user, parent.creation_time
else:
user, time = child.user, child.creation_time
# Create new treenode
new_treenode = _create_treenode(project_id,
user, request.user, params['x'], params['y'], params['z'],
params['radius'], params['confidence'], -1, params['parent_id'], time)
# Update parent of child to new treenode, do this in raw SQL to also get the
# updated edition time Update also takeover children
cursor = connection.cursor()
paramlist = [new_treenode.treenode_id, child.id]
if takeover_child_ids:
paramlist.extend(takeover_child_ids)
child_template = ",".join(("%s",) * (len(takeover_child_ids) + 1))
else:
child_template = "%s"
cursor.execute(f"""
UPDATE treenode SET parent_id = %s
WHERE id IN ({child_template})
RETURNING id, edition_time
""", paramlist)
result = cursor.fetchall()
if not result or (len(paramlist) - 1) != len(result):
raise ValueError("Couldn't update parent of inserted node's child: " + child.id)
child_edition_times = [[k,v] for k,v in result]
# Create all initial links
if links:
created_links = create_connector_link(project_id, request.user.id,
new_treenode.treenode_id, new_treenode.skeleton_id, links)
else:
created_links = []
return JsonResponse({
'treenode_id': new_treenode.treenode_id,
'skeleton_id': new_treenode.skeleton_id,
'edition_time': new_treenode.edition_time,
'parent_edition_time': new_treenode.parent_edition_time,
'child_edition_times': child_edition_times,
'created_links': created_links
})
class NewTreenode(object):
"""Represent a newly created treenode and all the information that is
returned to the client
"""
def __init__(self, treenode_id, edition_time, skeleton_id,
parent_edition_time):
self.treenode_id = treenode_id
self.edition_time = edition_time
self.skeleton_id = skeleton_id
self.parent_edition_time = parent_edition_time
def _create_treenode(project_id, creator, editor, x, y, z, radius, confidence,
neuron_id, parent_id, creation_time=None, neuron_name=None) -> NewTreenode:
relation_map = get_relation_to_id_map(project_id)
class_map = get_class_to_id_map(project_id)
def insert_new_treenode(parent_id=None, skeleton_id=None):
""" If the parent_id is not None and the skeleton_id of the parent does
not match with the skeleton.id, then the database will throw an error
given that the skeleton_id, being defined as foreign key in the
treenode table, will not meet the being-foreign requirement.
"""
new_treenode = Treenode()
new_treenode.user = creator
new_treenode.editor = editor
new_treenode.project_id = project_id
if creation_time:
new_treenode.creation_time = creation_time
new_treenode.location_x = float(x)
new_treenode.location_y = float(y)
new_treenode.location_z = float(z)
new_radius = int(radius if (radius and not math.isnan(radius)) else 0)
new_treenode.radius = new_radius
new_treenode.skeleton_id = skeleton_id
new_confidence = int(confidence if not math.isnan(confidence) and (confidence or confidence == 0) else 5)
new_treenode.confidence = new_confidence
if parent_id:
new_treenode.parent_id = parent_id
new_treenode.save()
return new_treenode
def relate_neuron_to_skeleton(neuron, skeleton):
return _create_relation(creator, project_id,
relation_map['model_of'], skeleton, neuron)
response_on_error = ''
try:
if -1 != int(parent_id): # A root node and parent node exist
# Select the parent treenode for update to prevent race condition
# updates to its skeleton ID while this node is being created.
cursor = connection.cursor()
cursor.execute('''
SELECT t.skeleton_id, t.edition_time FROM treenode t
WHERE t.id = %s FOR NO KEY UPDATE OF t
''', (parent_id,))
if cursor.rowcount != 1:
raise ValueError('Parent treenode %s does not exist' % parent_id)
parent_node = cursor.fetchone()
parent_skeleton_id = parent_node[0]
parent_edition_time = parent_node[1]
# Raise an Exception if the user doesn't have permission to edit
# the neuron the skeleton of the treenode is modeling.
can_edit_skeleton_or_fail(editor, project_id, parent_skeleton_id,
relation_map['model_of'])
response_on_error = 'Could not insert new treenode!'
new_treenode = insert_new_treenode(parent_id, parent_skeleton_id)
return NewTreenode(new_treenode.id, new_treenode.edition_time,
parent_skeleton_id, parent_edition_time)
else:
# No parent node: We must create a new root node, which needs a
# skeleton and a neuron to belong to.
response_on_error = 'Could not insert new treenode instance!'
new_skeleton = ClassInstance()
new_skeleton.user = creator
new_skeleton.project_id = project_id
new_skeleton.class_column_id = class_map['skeleton']
new_skeleton.name = 'skeleton'
new_skeleton.save()
new_skeleton.name = 'skeleton %d' % new_skeleton.id
new_skeleton.save()
if -1 != neuron_id:
# Check that the neuron to use exists
if 0 == ClassInstance.objects.filter(pk=neuron_id).count():
neuron_id = -1
if -1 != neuron_id:
# Raise an Exception if the user doesn't have permission to
# edit the existing neuron.
can_edit_class_instance_or_fail(editor, neuron_id, 'neuron')
# A neuron already exists, so we use it
response_on_error = 'Could not relate the neuron model to ' \
'the new skeleton!'
relate_neuron_to_skeleton(neuron_id, new_skeleton.id)
response_on_error = 'Could not insert new treenode!'
new_treenode = insert_new_treenode(None, new_skeleton.id)
return NewTreenode(new_treenode.id, new_treenode.edition_time,
new_skeleton.id, None)
else:
# A neuron does not exist, therefore we put the new skeleton
# into a new neuron.
response_on_error = 'Failed to insert new instance of a neuron.'
new_neuron = ClassInstance()
new_neuron.user = creator
new_neuron.project_id = project_id
new_neuron.class_column_id = class_map['neuron']
if neuron_name:
# Create a regular expression to find allowed patterns. The
# first group is the whole {nX} part, while the second group
# is X only.
counting_pattern = re.compile(r"(\{n(\d+)\})")
# Look for patterns, replace all {n} with {n1} to normalize.
neuron_name = neuron_name.replace("{n}", "{n1}")
if counting_pattern.search(neuron_name):
# Find starting values for each substitution.
counts = [int(m.groups()[1]) for m in counting_pattern.finditer(neuron_name)]
# Find existing matching neurons in database.
name_match = counting_pattern.sub(r"(\d+)", neuron_name)
name_pattern = re.compile(name_match)
matching_neurons = ClassInstance.objects.filter(
project_id=project_id,
class_column_id=class_map['neuron'],
name__regex=name_match).order_by('name')
# Increment substitution values based on existing neurons.
for n in matching_neurons:
for i, (count, g) in enumerate(zip(counts, name_pattern.search(n.name).groups())): # type: ignore
if count == int(g):
counts[i] = count + 1
# Substitute values.
count_ind = 0
m = counting_pattern.search(neuron_name)
while m:
neuron_name = m.string[:m.start()] + str(counts[count_ind]) + m.string[m.end():]
count_ind = count_ind + 1
m = counting_pattern.search(neuron_name)
new_neuron.name = neuron_name
else:
new_neuron.name = 'neuron'
new_neuron.save()
new_neuron.name = 'neuron %d' % new_neuron.id
new_neuron.save()
response_on_error = 'Could not relate the neuron model to ' \
'the new skeleton!'
relate_neuron_to_skeleton(new_neuron.id, new_skeleton.id)
response_on_error = 'Failed to insert instance of treenode.'
new_treenode = insert_new_treenode(None, new_skeleton.id)
response_on_error = 'Failed to write to logs.'
new_location = (new_treenode.location_x, new_treenode.location_y,
new_treenode.location_z)
insert_into_log(project_id, creator.id, 'create_neuron',
new_location, 'Create neuron %d and skeleton '
'%d' % (new_neuron.id, new_skeleton.id))
return NewTreenode(new_treenode.id, new_treenode.edition_time,
new_skeleton.id, None)
except Exception as e:
import traceback
raise ValueError("%s: %s %s" % (response_on_error, str(e),
str(traceback.format_exc())))
@requires_user_role(UserRole.Annotate)
def update_parent(request:HttpRequest, project_id=None, treenode_id=None) -> JsonResponse:
treenode_id = int(treenode_id)
parent_id = int(request.POST.get('parent_id', -1))
can_edit_treenode_or_fail(request.user, project_id, treenode_id)
# Make sure the back-end is in the expected state
state.validate_state(treenode_id, request.POST.get('state'),
neighborhood=True, lock=True)
child = get_object_or_404(Treenode, pk=treenode_id, project_id=project_id)
parent = get_object_or_404(Treenode, pk=parent_id, project_id=project_id)
if child.skeleton_id != parent.skeleton_id:
raise ValueError("Child node %s is in skeleton %s but parent node %s is in skeleton %s!", \
treenode_id, child.skeleton_id, parent_id, parent.skeleton_id)
child.parent_id = parent_id
child.save()
return JsonResponse({
'success': True,
'node_id': child.id,
'parent_id': child.parent_id,
'skeleton_id': child.skeleton_id
})
def update_node_radii(node_ids, radii, cursor=None) -> Dict:
"""Update radius of a list of nodes, returns old radii.
Both lists/tupples and single values can be supplied.
"""
# Make sure we deal with lists
type_nodes = type(node_ids)
if type_nodes not in (list, tuple):
node_ids = (node_ids,)
# If only one a single radius value is available, use it for every input
# node ID.
type_radii = type(radii)
if type_radii not in (list, tuple):
radii = len(node_ids) * (radii,)
if len(node_ids) != len(radii):
raise ValueError("Number of treenode doesn't match number of radii")
invalid_radii = [r for r in radii if math.isnan(r)]
if invalid_radii:
raise ValueError("Some radii where not numbers: " +
", ".join(invalid_radii))
# Make sure we have a database cursor
cursor = cursor or connection.cursor()
# Create a list of the form [(node id, radius), ...]
node_radii = "(" + "),(".join(map(lambda pair: f"{pair[0]},{pair[1]}",
zip(node_ids, radii))) + ")"
cursor.execute(f'''
UPDATE treenode t SET radius = target.new_radius
FROM (SELECT x.id, x.radius AS old_radius, y.new_radius
FROM treenode x
INNER JOIN (VALUES {node_radii}) y(id, new_radius)
ON x.id=y.id FOR NO KEY UPDATE) target
WHERE t.id = target.id
RETURNING t.id, target.old_radius, target.new_radius,
t.edition_time, t.skeleton_id;
''')
updated_rows = cursor.fetchall()
if len(node_ids) != len(updated_rows):
missing_ids = frozenset(node_ids) - frozenset([r[0] for r in updated_rows])
raise ValueError('Coudn\'t find treenodes ' +
','.join([str(ni) for ni in missing_ids]))
return {r[0]: {
'old': r[1],
'new': float(r[2]),
'edition_time': r[3],
'skeleton_id': r[4]
} for r in updated_rows}
@requires_user_role(UserRole.Annotate)
def update_radii(request:HttpRequest, project_id=None) -> JsonResponse:
"""Update the radius of one or more nodes"""
treenode_ids = [int(v) for k,v in request.POST.items() \
if k.startswith('treenode_ids[')]
radii = [float(v) for k,v in request.POST.items() \
if k.startswith('treenode_radii[')]
# Make sure the back-end is in the expected state
cursor = connection.cursor()
state.validate_state(treenode_ids, request.POST.get('state'),
multinode=True, lock=True, cursor=cursor)
updated_nodes = update_node_radii(treenode_ids, radii, cursor)
return JsonResponse({
'success': True,
'updated_nodes': updated_nodes
})
@requires_user_role(UserRole.Annotate)
def update_radius(request:HttpRequest, project_id=None, treenode_id=None) -> JsonResponse:
treenode_id = int(treenode_id)
radius = float(request.POST.get('radius', -1))
if math.isnan(radius):
raise ValueError("Radius '%s' is not a number!" % request.POST.get('radius'))
option = int(request.POST.get('option', 0))
cursor = connection.cursor()
# Make sure the back-end is in the expected state
state.validate_state(treenode_id, request.POST.get('state'),
node=True, lock=True, cursor=cursor)
def create_update_response(updated_nodes, radius) -> JsonResponse:
return JsonResponse({
'success': True,
'updated_nodes': updated_nodes,
'new_radius': radius
})
if 0 == option:
# Update radius only for the passed in treenode and return the old
# radius.
old_radii = update_node_radii(treenode_id, radius, cursor)
return create_update_response(old_radii, radius)
cursor.execute('''
SELECT id, parent_id, radius
FROM treenode
WHERE skeleton_id = (SELECT t.skeleton_id FROM treenode t WHERE id = %s)
''' % treenode_id)
if 1 == option:
# Update radius from treenode_id to next branch or end node (included)
children:DefaultDict[Any, List] = defaultdict(list)
for row in cursor.fetchall():
children[row[1]].append(row[0])
include = [treenode_id]
c = children[treenode_id]
while 1 == len(c):
child = c[0]
include.append(child)
c = children[child]
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
if 2 == option:
# Update radius from treenode_id to prev branch node or root (excluded)
parents = {}
children = defaultdict(list)
for row in cursor.fetchall():
parents[row[0]] = row[1]
children[row[1]].append(row[0])
include = [treenode_id]
parent = parents[treenode_id]
while parent and parents[parent] and 1 == len(children[parent]):
include.append(parent)
parent = parents[parent]
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
if 3 == option:
# Update radius from treenode_id to prev node with radius (excluded)
parents = {}
for row in cursor.fetchall():
if row[2] < 0 or row[0] == treenode_id: # DB default radius is 0 but is initialized to -1 elsewhere
parents[row[0]] = row[1]
include = [treenode_id]
parent = parents[treenode_id]
while parent in parents:
include.append(parent)
parent = parents[parent]
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
if 4 == option:
# Update radius from treenode_id to root (included)
parents = {row[0]: row[1] for row in cursor.fetchall()}
include = [treenode_id]
parent = parents[treenode_id]
while parent:
include.append(parent)
parent = parents[parent]
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
if 5 == option:
# Update radius of all nodes (in a single query)
skeleton_id = Treenode.objects.get(pk=treenode_id).skeleton_id
include = list(Treenode.objects.filter(skeleton_id=skeleton_id) \
.values_list('id', flat=True))
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
@requires_user_role(UserRole.Annotate)
def delete_treenode(request:HttpRequest, project_id=None) -> JsonResponse:
""" Deletes a treenode. If the skeleton has a single node, deletes the
skeleton and its neuron. Returns the parent_id, if any."""
treenode_id = int(request.POST.get('treenode_id', -1))
# Raise an exception if the user doesn't have permission to edit the
# treenode.
can_edit_or_fail(request.user, treenode_id, 'treenode')
# Raise an Exception if the user doesn't have permission to edit the neuron
# the skeleton of the treenode is modeling.
can_edit_treenode_or_fail(request.user, project_id, treenode_id)
# Make sure the back-end is in the expected state
state.validate_state(treenode_id, request.POST.get('state'), lock=True,
neighborhood=True)
treenode = Treenode.objects.get(pk=treenode_id)
parent_id = treenode.parent_id
# Get information about linked connectors
links = list(TreenodeConnector.objects.filter(project_id=project_id,
treenode_id=treenode_id).values_list('id', 'relation_id',
'connector_id', 'confidence'))
# Prevent deletion if node is referenced from sampler or sampler domain. The
# deletion would fail regardless, but this way we can provide a nicer error
# message.
cursor = connection.cursor()
cursor.execute("""
SELECT
EXISTS(
SELECT 1 FROM catmaid_samplerinterval
WHERE project_id = %(project_id)s AND
(start_node_id = %(treenode_id)s OR end_node_id = %(treenode_id)s)),
EXISTS(
SELECT 1 FROM catmaid_samplerdomain
WHERE project_id = %(project_id)s AND
(start_node_id = %(treenode_id)s)),
EXISTS(
SELECT 1 FROM catmaid_samplerdomainend
WHERE end_node_id = %(treenode_id)s)
""", {
'project_id': project_id,
'treenode_id': treenode_id,
})
sampler_refs = cursor.fetchone()
has_sampler_interval_refs = sampler_refs[0]
has_sampler_domain_refs = sampler_refs[1] or sampler_refs[2]
if has_sampler_interval_refs:
raise ValueError("Can't delete node, it is used in at least one sampler interval")
if has_sampler_domain_refs:
raise ValueError("Can't delete node, it is used in at least one sampler domain")
response_on_error = ''
deleted_neuron = False
cursor = connection.cursor()
try:
if not parent_id:
children:List = []
# This treenode is root.
response_on_error = 'Could not retrieve children for ' \
'treenode #%s' % treenode_id
n_children = Treenode.objects.filter(parent=treenode).count()
response_on_error = "Could not delete root node"
if n_children > 0:
# TODO yes you can, the new root is the first of the children,
# and other children become independent skeletons
raise ValueError("You can't delete the root node when it "
"has children.")
# Get the neuron before the skeleton is deleted. It can't be
# accessed otherwise anymore.
neuron = ClassInstance.objects.get(project_id=project_id,
cici_via_b__relation__relation_name='model_of',
cici_via_b__class_instance_a=treenode.skeleton)
# Remove the original skeleton. It is OK to remove it if it only had
# one node, even if the skeleton's user does not match or the user
# is not superuser. Delete the skeleton, which triggers deleting
# the ClassInstanceClassInstance relationship with neuron_id
response_on_error = 'Could not delete skeleton.'
# Extra check for errors, like having two root nodes
count = Treenode.objects.filter(skeleton_id=treenode.skeleton_id) \
.count()
if 1 == count:
# deletes as well treenodes that refer to the skeleton
ClassInstance.objects.filter(pk=treenode.skeleton_id) \
.delete()
else:
raise ValueError("Can't delete isolated node: erroneously, " \
"its skeleton contains more than one treenode! " \
"Check for multiple root nodes.")
# If the neuron modeled by the skeleton of the treenode is empty,
# delete it.
response_on_error = 'Could not delete neuron #%s' % neuron.id
deleted_neuron = _delete_if_empty(neuron.id)
if deleted_neuron:
# Insert log entry for neuron deletion
insert_into_log(project_id, request.user.id, 'remove_neuron',
(treenode.location_x, treenode.location_y, treenode.location_z),
'Deleted neuron %s and skeleton(s) %s.' % (neuron.id, treenode.skeleton_id))
else:
# Treenode is not root, it has a parent and perhaps children.
# Reconnect all the children to the parent.
response_on_error = 'Could not update parent id of children nodes'
cursor.execute("""
UPDATE treenode SET parent_id = %s
WHERE project_id = %s AND parent_id = %s
RETURNING id, edition_time
""", (treenode.parent_id, project_id, treenode.id))
# Children will be a list of two-element lists, just what we want to
# return as child info.
children = cursor.fetchall()
# Remove treenode. Set the current user name in a transaction local
# variable. This is done to communicate the current user to the trigger
# that updates the skeleton summary table.
response_on_error = 'Could not delete treenode.'
cursor.execute("SET LOCAL catmaid.user_id=%(user_id)s", {
'user_id': request.user.id,
})
Treenode.objects.filter(project_id=project_id, pk=treenode_id).delete()
return JsonResponse({
'x': treenode.location_x,
'y': treenode.location_y,
'z': treenode.location_z,
'parent_id': parent_id,
'children': children,
'links': links,
'radius': treenode.radius,
'confidence': treenode.confidence,
'skeleton_id': treenode.skeleton_id,
'deleted_neuron': deleted_neuron,
'success': "Removed treenode successfully."
})
except Exception as e:
raise ValueError(response_on_error + ': ' + str(e))
def _compact_detail_list(project_id, treenode_ids=None, label_ids=None,
label_names=None, skeleton_ids=None):
"""
Return a list with information on the passed in node IDs or on treenodes
that match the optional label refrences. The result has the form:
[ID, parent ID, x, y, z, confidence, radius, skeleton_id, edition_time, user_id]
The returned edition time is an epoch number.
"""
if not any((treenode_ids, label_ids, label_names, skeleton_ids)):
raise ValueError("No treenode IDs, label IDs, label names or skeleton IDs provided")
extra_joins = []
extra_where = []
if treenode_ids:
extra_joins.append("""
JOIN UNNEST(%(treenode_ids)s::bigint[]) query(id)
ON t.id = query.id
""")
labeled_as = None
if label_ids or label_names:
relation_map = get_relation_to_id_map(project_id, ('labeled_as',))
labeled_as = relation_map['labeled_as']
if label_ids:
extra_joins.append("""
JOIN treenode_class_instance tci
ON tci.treenode_id = t.id
JOIN UNNEST(%(label_ids)s::bigint[]) label(id)
ON label.id = tci.class_instance_id
""")
extra_where.append("""
tci.relation_id = %(labeled_as)s
""")
if label_names:
extra_joins.append("""
JOIN treenode_class_instance tci
ON tci.treenode_id = t.id
JOIN class_instance ci
ON ci.id = tci.class_instance_id
JOIN UNNEST(%(label_names)s::text[]) label(name)
ON label.name = ci.name
""")
extra_where.append("""
tci.relation_id = %(labeled_as)s
""")
if skeleton_ids:
extra_joins.append("""
JOIN UNNEST(%(skeleton_ids)s::bigint[]) skeleton(id)
ON skeleton.id = t.skeleton_id
""")
cursor = connection.cursor()
cursor.execute("""
SELECT t.id, t.parent_id, t.location_x, t.location_y, t.location_z, t.confidence,
t.radius, t.skeleton_id,
EXTRACT(EPOCH FROM t.edition_time), t.user_id
FROM treenode t
{extra_joins}
WHERE t.project_id=%(project_id)s
{extra_where}
""".format(**{
'extra_joins': '\n'.join(extra_joins),
'extra_where': ('AND ' + ' AND\n'.join(extra_where)) if extra_where else '',
}), {
'project_id': project_id,
'treenode_ids': treenode_ids,
'labeled_as': labeled_as,
'label_ids': label_ids,
'label_names': label_names,
'skeleton_ids': skeleton_ids
})
rows = cursor.fetchall()
return rows
def _compact_detail(project_id, treenode_id):
"""
Return a list with information on the passed in node. It has the form:
[ID, parent ID, x, y, z, confidence, radius, skeleton_id, edition_time, user_id]
The returned edition time is an epoch number.
"""
cursor = connection.cursor()
cursor.execute("""
SELECT id, parent_id, location_x, location_y, location_z, confidence,
radius, skeleton_id, EXTRACT(EPOCH FROM edition_time), user_id
FROM treenode
WHERE id=%(treenode_id)s
AND project_id=%(project_id)s
""", {
'project_id': project_id,
'treenode_id': treenode_id
})
rows = cursor.fetchall()
if len(rows) == 0:
raise ValueError(f"Could not find treenode with ID {treenode_id}")
if len(rows) > 1:
raise ValueError(f"Found {len(rows)} treenodes with ID {treenode_id}, expected one")
return rows[0]
def _treenode_info(project_id, treenode_id):
c = connection.cursor()
# (use raw SQL since we are returning values from several different models)
c.execute("""
SELECT
treenode.skeleton_id,
ci.name as skeleton_name,
ci2.id as neuron_id,
ci2.name as neuron_name
FROM
treenode,
relation r,
class_instance ci,
class_instance ci2,
class_instance_class_instance cici
WHERE ci.project_id = %s
AND treenode.id = %s
AND treenode.skeleton_id = ci.id
AND ci.id = cici.class_instance_a
AND ci2.id = cici.class_instance_b
AND cici.relation_id = r.id
AND r.relation_name = 'model_of'
""", (project_id, treenode_id))
results = [
dict(zip([col[0] for col in c.description], row))
for row in c.fetchall()
]
if len(results) > 1:
raise ValueError('Found more than one skeleton and neuron for '
'treenode %s' % treenode_id)
elif len(results) == 0:
raise ValueError('No skeleton and neuron for treenode %s' % treenode_id)
return results[0]
@api_view(['GET'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def treenode_info(request:HttpRequest, project_id=None, treenode_id=None) -> JsonResponse:
"""Retrieve skeleton and neuron information about this treenode.
---
type:
skeleton_id:
description: ID of the treenode's skeleton
type: integer
required: true
skeleton_name:
description: Name of the treenode's skeleton
type: string
required: true
neuron_id:
description: ID of the treenode's neuron
type: integer
required: true
neuron_name:
description: Name of the treenode's neuron
type: string
required: true
"""
info = _treenode_info(int(project_id), int(treenode_id))
return JsonResponse(info)
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def compact_detail(request:HttpRequest, project_id=None, treenode_id=None) -> JsonResponse:
"""
Retrieve node information in a compact form. A list of the following form
is returned:
[ID, parent ID, x, y, z, confidence, radius, skeleton_id, edition_time, user_id]
The returned edition time is an epoch number.
"""
info = _compact_detail(int(project_id), int(treenode_id))
return JsonResponse(info, safe=False)
@api_view(['POST'])
@requires_user_role(UserRole.Browse)
def compact_detail_list(request:HttpRequest, project_id=None) -> JsonResponse:
"""
Retrieve node information in a compact form. A list of elements of the
following form is returned:
[ID, parent ID, x, y, z, confidence, radius, skeleton_id, edition_time, user_id]
The returned edition time is an epoch number.
---
parameters:
- name: project_id
description: Project to work in
required: true
- name: treenode_ids
description: A list of treeonde IDs to return information on
required: false
- name: label_ids
description: |
A list of label IDs that must be linked to result treenodes. Alternative
to explicit treenode IDs and label names.
required: false
- name: label_names
description: |
A list of label names that must be linked to result treenodes.
Alternative to explicit treenode IDs and label IDs
required: false
- name: skeleton_ids
description: |
A list of skeleton IDs that result skeletons have to be part of.
required: false
"""
treenode_ids = get_request_list(request.POST, 'treenode_ids', None, int)
label_ids = get_request_list(request.POST, 'label_ids', None, int)
label_names = get_request_list(request.POST, 'label_names')
skeleton_ids = get_request_list(request.POST, 'skeleton_ids', None, int)
if not any((treenode_ids, label_ids, label_names, skeleton_ids)):
raise ValueError("No treenode IDs, label IDs, label names or skeleton IDs provided")
info = _compact_detail_list(int(project_id), treenode_ids, label_ids,
label_names, skeleton_ids)
return JsonResponse(info, safe=False)
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def find_children(request:HttpRequest, project_id=None, treenode_id=None) -> JsonResponse:
try:
tnid = int(treenode_id)
cursor = connection.cursor()
cursor.execute('''
SELECT id, location_x, location_y, location_z
FROM treenode
WHERE parent_id = %s
''', (tnid,))
children = [[row] for row in cursor.fetchall()]
return JsonResponse(children, safe=False)
except Exception as e:
raise ValueError('Could not obtain next branch node or leaf: ' + str(e))
@api_view(['POST'])
@requires_user_role(UserRole.Annotate)
def update_confidence(request:HttpRequest, project_id=None, treenode_id=None) -> JsonResponse:
"""Update confidence of edge between a node to either its parent or its
connectors.
The connection between a node and its parent or the connectors it is linked
to can be rated with a confidence value in the range 1-5. If connector links
should be updated, one can limit the affected connections to a specific
connector. Returned is an object, mapping updated partners to their old
confidences.
---
parameters:
- name: new_confidence
description: New confidence, value in range 1-5
type: integer
required: true
- name: to_connector
description: Whether all linked connectors instead of parent should be updated
type: boolean
required: false
- name: partner_ids
description: Limit update to a set of connectors if to_connector is true
type: array
items: integer
required: false
- name: partner_confidences
description: Set different confidences to connectors in <partner_ids>
type: array
items: integer
required: false
type:
message:
type: string
required: true
updated_partners:
type: object
required: true
"""
tnid = int(treenode_id)
can_edit_treenode_or_fail(request.user, project_id, tnid)
cursor = connection.cursor()
state.validate_state(tnid, request.POST.get('state'),
node=True, lock=True, cursor=cursor)
to_connector = get_request_bool(request.POST, 'to_connector', False)
partner_ids = get_request_list(request.POST, 'partner_ids', None, int)
partner_confidences = get_request_list(request.POST, 'partner_confidences',
None, int)
new_confidence = int(request.POST.get('new_confidence', 0))
# If partner confidences are specified, make sure there are exactly as many
# as there are partners. Otherwise validate passed in confidence
if partner_ids and partner_confidences:
if len(partner_confidences) != len(partner_ids):
raise ValueError("There have to be as many partner confidences as"
"there are partner IDs")
else:
if new_confidence < 1 or new_confidence > 5:
raise ValueError('Confidence not in range 1-5 inclusive.')
if partner_ids:
# Prepare new confidences for connector query
partner_confidences = (new_confidence,) * len(partner_ids)
if to_connector:
if partner_ids:
partner_template = ",".join(("(%s,%s)",) * len(partner_ids))
partner_data = [p for v in zip(partner_ids, partner_confidences) for p in v]
cursor.execute(f'''
UPDATE treenode_connector tc
SET confidence = target.new_confidence
FROM (SELECT x.id, x.confidence AS old_confidence,
new_values.confidence AS new_confidence
FROM treenode_connector x
JOIN (VALUES {partner_template}) new_values(cid, confidence)
ON x.connector_id = new_values.cid
WHERE x.treenode_id = %s) target
WHERE tc.id = target.id
RETURNING tc.connector_id, tc.edition_time, target.old_confidence
''', partner_data + [tnid])
else:
cursor.execute('''
UPDATE treenode_connector tc
SET confidence = %s
FROM (SELECT x.id, x.confidence AS old_confidence
FROM treenode_connector x
WHERE treenode_id = %s) target
WHERE tc.id = target.id
RETURNING tc.connector_id, tc.edition_time, target.old_confidence
''', (new_confidence, tnid))
else:
cursor.execute('''
UPDATE treenode t
SET confidence = %s, editor_id = %s
FROM (SELECT x.id, x.confidence AS old_confidence
FROM treenode x
WHERE id = %s) target
WHERE t.id = target.id
RETURNING t.parent_id, t.edition_time, target.old_confidence
''', (new_confidence, request.user.id, tnid))
updated_partners = cursor.fetchall()
if len(updated_partners) > 0:
location = Location.objects.filter(id=tnid).values_list(
'location_x', 'location_y', 'location_z')[0]
insert_into_log(project_id, request.user.id, "change_confidence",
location, "Changed to %s" % new_confidence)
return JsonResponse({
'message': 'success',
'updated_partners': {
r[0]: {
'edition_time': r[1],
'old_confidence': r[2]
} for r in updated_partners
}
})
# Else, signal error
if to_connector:
raise ValueError('Failed to update confidence between treenode %s and '
'connector.' % tnid)
else:
raise ValueError('Failed to update confidence at treenode %s.' % tnid)
def _skeleton_as_graph(skeleton_id) -> nx.DiGraph:
# Fetch all nodes of the skeleton
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id
FROM treenode
WHERE skeleton_id=%s''', [skeleton_id])
# Create a directed graph of the skeleton
graph = nx.DiGraph()
for row in cursor.fetchall():
# row[0]: id
# row[1]: parent_id
graph.add_node(row[0])
if row[1]:
# Create directional edge from parent to child
graph.add_edge(row[1], row[0])
return graph
def _find_first_interesting_node(sequence):
""" Find the first node that:
1. Has confidence lower than 5
2. Has a tag
3. Has any connector (e.g. receives/makes synapse, markes as abutting, ...)
Otherwise return the last node.
"""
if not sequence:
raise ValueError('No nodes ahead!')
if 1 == len(sequence):
return sequence[0]
cursor = connection.cursor()
cursor.execute('''
SELECT t.id, t.confidence, tc.relation_id, tci.relation_id
FROM treenode t
LEFT OUTER JOIN treenode_connector tc ON (tc.treenode_id = t.id)
LEFT OUTER JOIN treenode_class_instance tci ON (tci.treenode_id = t.id)
WHERE t.id IN (%s)
''' % ",".join(map(str, sequence)))
nodes = {row[0]: row for row in cursor.fetchall()}
for node_id in sequence:
if node_id in nodes:
props = nodes[node_id]
# [1]: confidence
# [2]: a treenode_connector.relation_id, e.g. presynaptic_to or postsynaptic_to
# [3]: a treenode_class_instance.relation_id, e.g. labeled_as
# 2 and 3 may be None
if props[1] < 5 or props[2] or props[3]:
return node_id
else:
raise ValueError('Nodes of this skeleton changed while inspecting them.')
return sequence[-1]
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def find_previous_branchnode_or_root(request:HttpRequest, project_id=None, treenode_id=None) -> JsonResponse:
try:
tnid = int(treenode_id)
alt = 1 == int(request.POST['alt'])
skid = Treenode.objects.get(pk=tnid).skeleton_id
graph = _skeleton_as_graph(skid)
# Travel upstream until finding a parent node with more than one child
# or reaching the root node
seq = [] # Does not include the starting node tnid
while True:
parents = graph.predecessors(tnid)
if parents: # list of parents is not empty
tnid = parents[0] # Can ony have one parent
seq.append(tnid)
if 1 != len(graph.successors(tnid)):
break # Found a branch node
else:
break # Found the root node
if seq and alt:
tnid = _find_first_interesting_node(seq)
return JsonResponse(_fetch_location(project_id, tnid), safe=False)
except Exception as e:
raise ValueError('Could not obtain previous branch node or root:' + str(e))
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def find_next_branchnode_or_end(request:HttpRequest, project_id=None, treenode_id=None) -> JsonResponse:
try:
tnid = int(treenode_id)
skid = Treenode.objects.get(pk=tnid).skeleton_id
graph = _skeleton_as_graph(skid)
children = graph.successors(tnid)
branches = []
for child_node_id in children:
# Travel downstream until finding a child node with more than one
# child or reaching an end node
seq = [child_node_id] # Does not include the starting node tnid
branch_end = child_node_id
while True:
branch_children = graph.successors(branch_end)
if 1 == len(branch_children):
branch_end = branch_children[0]
seq.append(branch_end)
else:
break # Found an end node or a branch node
branches.append([child_node_id,
_find_first_interesting_node(seq),
branch_end])
# If more than one branch exists, sort based on downstream arbor size.
if len(children) > 1:
branches.sort(
key=lambda b: len(nx.algorithms.traversal.depth_first_search.dfs_successors(graph, b[0])),
reverse=True)
# Leaf nodes will have no branches
if len(children) > 0:
# Create a dict of node ID -> node location
node_ids_flat = list(itertools.chain.from_iterable(branches))
node_locations = {row[0]: row for row in _fetch_locations(project_id, node_ids_flat)}
branches = [[node_locations[node_id] for node_id in branch] for branch in branches]
return JsonResponse(branches, safe=False)
except Exception as e:
raise ValueError('Could not obtain next branch node or leaf: ' + str(e))
def _importing_user(project_id, treenode_id):
cursor = connection.cursor()
cursor.execute(f"""
SELECT t_origin_tx.user_id
FROM (
SELECT txid, edition_time
FROM treenode__with_history th
WHERE th.id = %(obj_id)s
ORDER BY edition_time ASC
LIMIT 1
) t_origin
JOIN LATERAL (
SELECT cti.user_id
FROM catmaid_transaction_info cti
WHERE cti.transaction_id = t_origin.txid
-- Transaction ID wraparound match protection. A transaction
-- ID is only unique together with a date.
AND cti.execution_time = t_origin.edition_time
AND label = 'skeletons.import'
LIMIT 1
) t_origin_tx
ON TRUE
""", {
'obj_id': treenode_id,
})
result = cursor.fetchone()
return result[0] if result else None
@api_view(['GET'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def importing_user(request:HttpRequest, project_id:int, treenode_id:int) -> JsonResponse:
"""Retrieve the user ID of the user who imported the passed in treenode. If
this node wasn't imported, return None.
---
type:
importing_user:
description: ID of the importer of this node
type: integer
required: true
"""
importing_user_id = _importing_user(int(project_id), int(treenode_id))
return JsonResponse({
'importing_user_id': importing_user_id,
})
|
tomka/CATMAID
|
django/applications/catmaid/control/treenode.py
|
Python
|
gpl-3.0
| 53,294
|
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import gio
import glib
import gobject
import gtk
import xl.radio, xl.playlist
from xl import (
event,
common,
settings,
trax
)
from xl.nls import gettext as _
import xlgui.panel.playlists as playlistpanel
from xlgui.panel import menus
from xlgui import (
guiutil,
icons,
panel
)
from xlgui.widgets.common import DragTreeView
from xlgui.widgets import dialogs
class RadioException(Exception): pass
class ConnectionException(RadioException): pass
class RadioPanel(panel.Panel, playlistpanel.BasePlaylistPanelMixin):
"""
The Radio Panel
"""
__gsignals__ = {
'playlist-selected': (gobject.SIGNAL_RUN_LAST, None, (object,)),
'append-items': (gobject.SIGNAL_RUN_LAST, None, (object, bool)),
'replace-items': (gobject.SIGNAL_RUN_LAST, None, (object,)),
'queue-items': (gobject.SIGNAL_RUN_LAST, None, (object,)),
}
__gsignals__.update(playlistpanel.BasePlaylistPanelMixin._gsignals_)
ui_info = ('radio.ui', 'RadioPanelWindow')
_radiopanel = None
def __init__(self, parent, collection,
radio_manager, station_manager, name):
"""
Initializes the radio panel
"""
panel.Panel.__init__(self, parent, name)
playlistpanel.BasePlaylistPanelMixin.__init__(self)
self.collection = collection
self.manager = radio_manager
self.playlist_manager = station_manager
self.nodes = {}
self.load_nodes = {}
self.complete_reload = {}
self.loaded_nodes = []
self._setup_tree()
self._setup_widgets()
self.playlist_image = icons.MANAGER.pixbuf_from_icon_name(
'music-library', gtk.ICON_SIZE_SMALL_TOOLBAR)
# menus
self.playlist_menu = menus.RadioPanelPlaylistMenu(self)
self.track_menu = menus.TrackPanelMenu(self)
self._connect_events()
self.load_streams()
RadioPanel._radiopanel = self
def load_streams(self):
"""
Loads radio streams from plugins
"""
for name in self.playlist_manager.playlists:
pl = self.playlist_manager.get_playlist(name)
if pl is not None:
self.playlist_nodes[pl] = self.model.append(self.custom,
[self.playlist_image,
pl.name, pl])
self._load_playlist_nodes(pl)
self.tree.expand_row(self.model.get_path(self.custom), False)
for name, value in self.manager.stations.iteritems():
self.add_driver(value)
def _add_driver_cb(self, type, object, driver):
glib.idle_add(self.add_driver, driver)
def add_driver(self, driver):
"""
Adds a driver to the radio panel
"""
node = self.model.append(self.radio_root, [self.folder, str(driver), driver])
self.nodes[driver] = node
self.load_nodes[driver] = self.model.append(node, [self.refresh_image,
_('Loading streams...'), None])
self.tree.expand_row(self.model.get_path(self.radio_root), False)
if settings.get_option('gui/radio/%s_station_expanded' %
driver.name, False):
self.tree.expand_row(self.model.get_path(node), False)
def _remove_driver_cb(self, type, object, driver):
glib.idle_add(self.remove_driver, driver)
def remove_driver(self, driver):
"""
Removes a driver from the radio panel
"""
if driver in self.nodes:
self.model.remove(self.nodes[driver])
del self.nodes[driver]
def _setup_widgets(self):
"""
Sets up the various widgets required for this panel
"""
self.status = self.builder.get_object('status_label')
@guiutil.idle_add()
def _set_status(self, message, timeout=0):
self.status.set_text(message)
if timeout:
glib.timeout_add_seconds(timeout, self._set_status, '', 0)
def _connect_events(self):
"""
Connects events used in this panel
"""
self.builder.connect_signals({
'on_add_button_clicked': self._on_add_button_clicked,
})
self.tree.connect('row-expanded', self.on_row_expand)
self.tree.connect('row-collapsed', self.on_collapsed)
self.tree.connect('row-activated', self.on_row_activated)
self.tree.connect('key-release-event', self.on_key_released)
event.add_callback(self._add_driver_cb, 'station_added',
self.manager)
event.add_callback(self._remove_driver_cb, 'station_removed',
self.manager)
def _on_add_button_clicked(self, *e):
dialog = dialogs.MultiTextEntryDialog(self.parent,
_("Add Radio Station"))
dialog.add_field(_("Name:"))
url_field = dialog.add_field(_("URL:"))
clipboard = gtk.clipboard_get()
text = clipboard.wait_for_text()
if text is not None:
location = gio.File(uri=text)
if location.get_uri_scheme() is not None:
url_field.set_text(text)
result = dialog.run()
dialog.hide()
if result == gtk.RESPONSE_OK:
(name, uri) = dialog.get_values()
self._do_add_playlist(name, uri)
@common.threaded
def _do_add_playlist(self, name, uri):
from xl import playlist, trax
if playlist.is_valid_playlist(uri):
pl = playlist.import_playlist(uri)
pl.name = name
else:
pl = playlist.Playlist(name)
tracks = trax.get_tracks_from_uri(uri)
pl.extend(tracks)
self.playlist_manager.save_playlist(pl)
self._add_to_tree(pl)
@guiutil.idle_add()
def _add_to_tree(self, pl):
self.playlist_nodes[pl] = self.model.append(self.custom,
[self.playlist_image, pl.name, pl])
self._load_playlist_nodes(pl)
def _setup_tree(self):
"""
Sets up the tree that displays the radio panel
"""
box = self.builder.get_object('RadioPanel')
self.tree = playlistpanel.PlaylistDragTreeView(self, True, True)
self.tree.set_headers_visible(False)
self.targets = [('text/uri-list', 0, 0)]
# columns
text = gtk.CellRendererText()
if settings.get_option('gui/ellipsize_text_in_panels', False):
import pango
text.set_property( 'ellipsize-set', True)
text.set_property( 'ellipsize', pango.ELLIPSIZE_END)
icon = gtk.CellRendererPixbuf()
col = gtk.TreeViewColumn('radio')
col.pack_start(icon, False)
col.pack_start(text, True)
col.set_attributes(icon, pixbuf=0)
col.set_cell_data_func(text, self.cell_data_func)
self.tree.append_column(col)
self.model = gtk.TreeStore(gtk.gdk.Pixbuf, str, object)
self.tree.set_model(self.model)
self.track = icons.MANAGER.pixbuf_from_icon_name(
'audio-x-generic', gtk.ICON_SIZE_SMALL_TOOLBAR)
self.folder = self.tree.render_icon(
gtk.STOCK_DIRECTORY, gtk.ICON_SIZE_SMALL_TOOLBAR)
self.refresh_image = icons.MANAGER.pixbuf_from_stock(
gtk.STOCK_REFRESH)
self.custom = self.model.append(None, [self.folder, _("Saved Stations"), None])
self.radio_root = self.model.append(None, [self.folder, _("Radio "
"Streams"), None])
scroll = gtk.ScrolledWindow()
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll.add(self.tree)
scroll.set_shadow_type(gtk.SHADOW_IN)
box.pack_start(scroll, True, True)
def on_row_activated(self, tree, path, column):
item = self.model[path][2]
if isinstance(item, xl.radio.RadioItem):
self.emit('playlist-selected', item.get_playlist())
elif isinstance(item, playlistpanel.TrackWrapper):
self.emit('playlist-selected', item.playlist)
elif isinstance(item, xl.playlist.Playlist):
self.open_station(item)
def open_station(self, playlist):
"""
Opens a saved station
"""
self.emit('playlist-selected', playlist)
def get_menu(self):
"""
Returns the menu that all radio stations use
"""
menu = guiutil.Menu()
menu.append(_("Refresh"), self.on_reload, gtk.STOCK_REFRESH)
return menu
def on_key_released(self, widget, event):
"""
Called when a key is released in the tree
"""
if event.keyval == gtk.keysyms.Menu:
(mods,paths) = self.tree.get_selection().get_selected_rows()
if paths and paths[0]:
iter = self.model.get_iter(paths[0])
item = self.model.get_value(iter, 2)
if isinstance(item, (xl.radio.RadioStation, xl.radio.RadioList,
xl.radio.RadioItem)):
if isinstance(item, xl.radio.RadioStation):
station = item
else:
station = item.station
if station and hasattr(station, 'get_menu'):
menu = station.get_menu(self)
gtk.Menu.popup(menu, None, None, None, 0, event.time)
elif isinstance(item, xl.playlist.Playlist):
gtk.Menu.popup(self.playlist_menu, None, None, None, 0, event.time)
elif isinstance(item, playlistpanel.TrackWrapper):
gtk.Menu.popup(self.track_menu, None, None, None, 0, event.time)
return True
if event.keyval == gtk.keysyms.Left:
(mods,paths) = self.tree.get_selection().get_selected_rows()
if paths and paths[0]:
self.tree.collapse_row(paths[0])
return True
if event.keyval == gtk.keysyms.Right:
(mods,paths) = self.tree.get_selection().get_selected_rows()
if paths and paths[0]:
self.tree.expand_row(paths[0], False)
return True
return False
def button_release(self, widget, event):
"""
Called when someone clicks on the tree
"""
if event.button == 3:
(x, y) = map(int, event.get_coords())
path = self.tree.get_path_at_pos(x, y)
if path:
iter = self.model.get_iter(path[0])
item = self.model.get_value(iter, 2)
if isinstance(item, (xl.radio.RadioStation, xl.radio.RadioList,
xl.radio.RadioItem)):
if isinstance(item, xl.radio.RadioStation):
station = item
else:
station = item.station
if station and hasattr(station, 'get_menu'):
menu = station.get_menu(self)
menu.popup(None, None, None, event.button, event.time)
elif isinstance(item, xl.playlist.Playlist):
self.playlist_menu.popup(event)
elif isinstance(item, playlistpanel.TrackWrapper):
self.track_menu.popup(event)
def cell_data_func(self, column, cell, model, iter):
"""
Called when the tree needs a value for column 1
"""
object = model.get_value(iter, 1)
cell.set_property('text', str(object))
def drag_data_received(self, tv, context, x, y, selection, info, etime):
"""
Called when someone drags some thing onto the playlist panel
"""
#if the drag originated from radio view deny it
#TODO this might change if we are allowed to change the order of radio
if tv == context.get_source_widget():
context.drop_finish(False, etime)
return
locs = list(selection.get_uris())
path = self.tree.get_path_at_pos(x, y)
if path:
# Add whatever we received to the playlist at path
iter = self.model.get_iter(path[0])
current_playlist = self.model.get_value(iter, 2)
# if it's a track that we've dragged to, get the parent
if isinstance(current_playlist, playlistpanel.TrackWrapper):
current_playlist = current_playlist.playlist
elif not isinstance(current_playlist, xl.playlist.Playlist):
self._add_new_station(locs)
return
(tracks, playlists) = self.tree.get_drag_data(locs)
current_playlist.extend(tracks)
# Do we save in the case when a user drags a file onto a playlist in the playlist panel?
# note that the playlist does not have to be open for this to happen
self.playlist_manager.save_playlist(current_playlist, overwrite=True)
self._load_playlist_nodes(current_playlist)
else:
self._add_new_station(locs)
def _add_new_station(self, locs):
"""
Add a new station
"""
# If the user dragged files prompt for a new playlist name
# else if they dragged a playlist add the playlist
#We don't want the tracks in the playlists to be added to the
# master tracks list so we pass in False
(tracks, playlists) = self.tree.get_drag_data(locs, False)
#First see if they dragged any playlist files
for new_playlist in playlists:
self.model.append(self.custom, [self.playlist_image,
new_playlist.name, new_playlist])
# We are adding a completely new playlist with tracks so we save it
self.playlist_manager.save_playlist(new_playlist, overwrite=True)
#After processing playlist proceed to ask the user for the
#name of the new playlist to add and add the tracks to it
if len(tracks) > 0:
dialog = dialogs.TextEntryDialog(
_("Enter the name you want for your new playlist"),
_("New Playlist"))
result = dialog.run()
if result == gtk.RESPONSE_OK:
name = dialog.get_value()
if not name == "":
#Create the playlist from all of the tracks
new_playlist = xl.playlist.Playlist(name)
new_playlist.extend(tracks)
self.playlist_nodes[new_playlist] = self.model.append(self.custom,
[self.playlist_image,
new_playlist.name, new_playlist])
self.tree.expand_row(self.model.get_path(self.custom), False)
# We are adding a completely new playlist with tracks so we save it
self.playlist_manager.save_playlist(new_playlist)
self._load_playlist_nodes(new_playlist)
def drag_get_data(self, tv, context, selection_data, info, time):
"""
Called when the user drags a playlist from the radio panel
"""
tracks = self.tree.get_selected_tracks()
if not tracks:
return
for track in tracks:
DragTreeView.dragged_data[track.get_loc_for_io()] = track
uris = trax.util.get_uris_from_tracks(tracks)
selection_data.set_uris(uris)
def drag_data_delete(self, *e):
"""
stub
"""
pass
def on_reload(self, *e):
"""
Called when the refresh button is clicked
"""
selection = self.tree.get_selection()
info = selection.get_selected_rows()
if not info: return
(model, paths) = info
iter = self.model.get_iter(paths[0])
object = self.model.get_value(iter, 2)
try:
self.loaded_nodes.remove(self.nodes[object])
except ValueError: pass
if isinstance(object, (xl.radio.RadioList, xl.radio.RadioStation)):
self._clear_node(iter)
self.load_nodes[object] = self.model.append(iter,
[self.refresh_image, _("Loading streams..."), None])
self.complete_reload[object] = True
self.tree.expand_row(self.model.get_path(iter), False)
@staticmethod
def set_station_expanded_value(station, value):
settings.set_option(
'gui/radio/%s_station_expanded' % station,
True,
)
def on_row_expand(self, tree, iter, path):
"""
Called when a user expands a row in the tree
"""
driver = self.model.get_value(iter, 2)
if not isinstance(driver, xl.playlist.Playlist):
self.model.set_value(iter, 0, self.folder)
if isinstance(driver, xl.radio.RadioStation) or \
isinstance(driver, xl.radio.RadioList):
if not self.nodes[driver] in self.loaded_nodes:
self._load_station(iter, driver)
if isinstance(driver, xl.radio.RadioStation):
self.set_station_expanded_value(driver.name, True)
def on_collapsed(self, tree, iter, path):
"""
Called when someone collapses a tree item
"""
driver = self.model.get_value(iter, 2)
if not isinstance(driver, xl.playlist.Playlist):
self.model.set_value(iter, 0, self.folder)
if isinstance(driver, xl.radio.RadioStation):
self.set_station_expanded_value(driver.name, False)
@common.threaded
def _load_station(self, iter, driver):
"""
Loads a radio station
"""
lists = None
no_cache = False
if driver in self.complete_reload:
no_cache = True
del self.complete_reload[driver]
if isinstance(driver, xl.radio.RadioStation):
try:
lists = driver.get_lists(no_cache=no_cache)
except RadioException, e:
self._set_status(str(e), 2)
else:
try:
lists = driver.get_items(no_cache=no_cache)
except RadioException, e:
self._set_status(str(e), 2)
if not lists: return
glib.idle_add(self._done_loading, iter, driver, lists)
def _done_loading(self, iter, object, items):
"""
Called when an item is done loading. Adds items to the tree
"""
self.loaded_nodes.append(self.nodes[object])
for item in items:
if isinstance(item, xl.radio.RadioList):
node = self.model.append(self.nodes[object], [self.folder, item.name, item])
self.nodes[item] = node
self.load_nodes[item] = self.model.append(node, [self.refresh_image,
_("Loading streams..."), None])
else:
self.model.append(self.nodes[object], [self.track, item.name,
item])
try:
self.model.remove(self.load_nodes[object])
del self.load_nodes[object]
except KeyError: pass
def _clear_node(self, node):
"""
Clears a node of all children
"""
remove = []
iter = self.model.iter_children(node)
while iter:
remove.append(iter)
iter = self.model.iter_next(iter)
for row in remove:
self.model.remove(row)
def set_status(message, timeout=0):
RadioPanel._radiopanel._set_status(message, timeout)
|
eri-trabiccolo/exaile
|
xlgui/panel/radio.py
|
Python
|
gpl-2.0
| 20,745
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import logging
__version__ = '0.1.6'
class DistlibException(Exception):
pass
try:
from logging import NullHandler
except ImportError: # pragma: no cover
class NullHandler(logging.Handler):
def handle(self, record): pass
def emit(self, record): pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
|
minrk/pip
|
pip/_vendor/distlib/__init__.py
|
Python
|
mit
| 534
|
"""
If Salt's OS detection does not identify a different virtual service module, the minion will fall back to using this basic module, which simply wraps sysvinit scripts.
"""
import fnmatch
import os
import re
__func_alias__ = {"reload_": "reload"}
_GRAINMAP = {"Arch": "/etc/rc.d", "Arch ARM": "/etc/rc.d"}
def __virtual__():
"""
Only work on systems which exclusively use sysvinit
"""
# Disable on these platforms, specific service modules exist:
disable = {
"RedHat",
"CentOS",
"Amazon",
"ScientificLinux",
"CloudLinux",
"Fedora",
"Gentoo",
"Ubuntu",
"Debian",
"Devuan",
"ALT",
"OEL",
"Linaro",
"elementary OS",
"McAfee OS Server",
"Raspbian",
"SUSE",
"Slackware",
}
if __grains__.get("os") in disable:
return (False, "Your OS is on the disabled list")
# Disable on all non-Linux OSes as well
if __grains__["kernel"] != "Linux":
return (False, "Non Linux OSes are not supported")
init_grain = __grains__.get("init")
if init_grain not in (None, "sysvinit", "unknown"):
return (False, "Minion is running {}".format(init_grain))
elif __utils__["systemd.booted"](__context__):
# Should have been caught by init grain check, but check just in case
return (False, "Minion is running systemd")
return "service"
def run(name, action):
"""
Run the specified service with an action.
.. versionadded:: 2015.8.1
name
Service name.
action
Action name (like start, stop, reload, restart).
CLI Example:
.. code-block:: bash
salt '*' service.run apache2 reload
salt '*' service.run postgresql initdb
"""
cmd = (
os.path.join(_GRAINMAP.get(__grains__.get("os"), "/etc/init.d"), name)
+ " "
+ action
)
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def start(name):
"""
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
return run(name, "start")
def stop(name):
"""
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
"""
return run(name, "stop")
def restart(name):
"""
Restart the specified service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
"""
return run(name, "restart")
def status(name, sig=None):
"""
Return the status for a service.
If the name contains globbing, a dict mapping service name to PID or empty
string is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
string: PID if running, empty otherwise
dict: Maps service name to PID if running, empty string otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
"""
if sig:
return __salt__["status.pid"](sig)
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
results[service] = __salt__["status.pid"](service)
if contains_globbing:
return results
return results[name]
def reload_(name):
"""
Refreshes config files by calling service reload. Does not perform a full
restart.
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
"""
return run(name, "reload")
def available(name):
"""
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
"""
return name in get_all()
def missing(name):
"""
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
"""
return name not in get_all()
def get_all():
"""
Return a list of all available services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
"""
if not os.path.isdir(_GRAINMAP.get(__grains__.get("os"), "/etc/init.d")):
return []
return sorted(os.listdir(_GRAINMAP.get(__grains__.get("os"), "/etc/init.d")))
|
saltstack/salt
|
salt/modules/linux_service.py
|
Python
|
apache-2.0
| 4,753
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, posixpath
from functools import partial
from PyQt4.Qt import (QMenu, Qt, QInputDialog, QToolButton, QDialog,
QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QIcon, QSize,
QCoreApplication, pyqtSignal)
from calibre import isbytestring, sanitize_file_name_unicode
from calibre.constants import (filesystem_encoding, iswindows,
get_portable_base)
from calibre.utils.config import prefs
from calibre.gui2 import (gprefs, warning_dialog, Dispatcher, error_dialog,
question_dialog, info_dialog, open_local_file, choose_dir)
from calibre.library.database2 import LibraryDatabase2
from calibre.gui2.actions import InterfaceAction
class LibraryUsageStats(object): # {{{
def __init__(self):
self.stats = {}
self.read_stats()
base = get_portable_base()
if base is not None:
lp = prefs['library_path']
if lp:
# Rename the current library. Renaming of other libraries is
# handled by the switch function
q = os.path.basename(lp)
for loc in list(self.stats.iterkeys()):
bn = posixpath.basename(loc)
if bn.lower() == q.lower():
self.rename(loc, lp)
def read_stats(self):
stats = gprefs.get('library_usage_stats', {})
self.stats = stats
def write_stats(self):
locs = list(self.stats.keys())
locs.sort(cmp=lambda x, y: cmp(self.stats[x], self.stats[y]),
reverse=True)
for key in locs[25:]:
self.stats.pop(key)
gprefs.set('library_usage_stats', self.stats)
def remove(self, location):
self.stats.pop(location, None)
self.write_stats()
def canonicalize_path(self, lpath):
if isbytestring(lpath):
lpath = lpath.decode(filesystem_encoding)
lpath = lpath.replace(os.sep, '/')
return lpath
def library_used(self, db):
lpath = self.canonicalize_path(db.library_path)
if lpath not in self.stats:
self.stats[lpath] = 0
self.stats[lpath] += 1
self.write_stats()
def locations(self, db):
lpath = self.canonicalize_path(db.library_path)
locs = list(self.stats.keys())
if lpath in locs:
locs.remove(lpath)
locs.sort(cmp=lambda x, y: cmp(self.stats[x], self.stats[y]),
reverse=True)
for loc in locs:
yield self.pretty(loc), loc
def pretty(self, loc):
if loc.endswith('/'):
loc = loc[:-1]
return loc.split('/')[-1]
def rename(self, location, newloc):
newloc = self.canonicalize_path(newloc)
stats = self.stats.pop(location, None)
if stats is not None:
self.stats[newloc] = stats
self.write_stats()
# }}}
class MovedDialog(QDialog): # {{{
def __init__(self, stats, location, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle(_('No library found'))
self._l = l = QGridLayout(self)
self.setLayout(l)
self.stats, self.location = stats, location
loc = self.oldloc = location.replace('/', os.sep)
self.header = QLabel(_('No existing calibre library was found at %s. '
'If the library was moved, select its new location below. '
'Otherwise calibre will forget this library.')%loc)
self.header.setWordWrap(True)
ncols = 2
l.addWidget(self.header, 0, 0, 1, ncols)
self.cl = QLabel('<br><b>'+_('New location of this library:'))
l.addWidget(self.cl, 1, 0, 1, ncols)
self.loc = QLineEdit(loc, self)
l.addWidget(self.loc, 2, 0, 1, 1)
self.cd = QToolButton(self)
self.cd.setIcon(QIcon(I('document_open.png')))
self.cd.clicked.connect(self.choose_dir)
l.addWidget(self.cd, 2, 1, 1, 1)
self.bb = QDialogButtonBox(self)
b = self.bb.addButton(_('Library moved'), self.bb.AcceptRole)
b.setIcon(QIcon(I('ok.png')))
b = self.bb.addButton(_('Forget library'), self.bb.RejectRole)
b.setIcon(QIcon(I('edit-clear.png')))
self.bb.accepted.connect(self.accept)
self.bb.rejected.connect(self.reject)
l.addWidget(self.bb, 3, 0, 1, ncols)
self.resize(self.sizeHint() + QSize(100, 50))
def choose_dir(self):
d = choose_dir(self, 'library moved choose new loc',
_('New library location'), default_dir=self.oldloc)
if d is not None:
self.loc.setText(d)
def reject(self):
self.stats.remove(self.location)
QDialog.reject(self)
def accept(self):
newloc = unicode(self.loc.text())
if not LibraryDatabase2.exists_at(newloc):
error_dialog(self, _('No library found'),
_('No existing calibre library found at %s')%newloc,
show=True)
return
self.stats.rename(self.location, newloc)
self.newloc = newloc
QDialog.accept(self)
# }}}
class ChooseLibraryAction(InterfaceAction):
name = 'Choose Library'
action_spec = (_('Choose Library'), 'lt.png',
_('Choose calibre library to work with'), None)
dont_add_to = frozenset(['context-menu-device'])
action_add_menu = True
action_menu_clone_qaction = _('Switch/create library...')
restore_view_state = pyqtSignal(object)
def genesis(self):
self.base_text = _('%d books')
self.count_changed(0)
self.qaction.triggered.connect(self.choose_library,
type=Qt.QueuedConnection)
self.action_choose = self.menuless_qaction
self.stats = LibraryUsageStats()
self.popup_type = (QToolButton.InstantPopup if len(self.stats.stats) > 1 else
QToolButton.MenuButtonPopup)
self.choose_menu = self.qaction.menu()
if not os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH', None):
self.choose_menu.addAction(self.action_choose)
self.quick_menu = QMenu(_('Quick switch'))
self.quick_menu_action = self.choose_menu.addMenu(self.quick_menu)
self.rename_menu = QMenu(_('Rename library'))
self.rename_menu_action = self.choose_menu.addMenu(self.rename_menu)
self.delete_menu = QMenu(_('Remove library'))
self.delete_menu_action = self.choose_menu.addMenu(self.delete_menu)
ac = self.create_action(spec=(_('Pick a random book'), 'random.png',
None, None), attr='action_pick_random')
ac.triggered.connect(self.pick_random)
self.choose_menu.addAction(ac)
self.rename_separator = self.choose_menu.addSeparator()
self.switch_actions = []
for i in range(5):
ac = self.create_action(spec=('', None, None, None),
attr='switch_action%d'%i)
self.switch_actions.append(ac)
ac.setVisible(False)
ac.triggered.connect(partial(self.qs_requested, i),
type=Qt.QueuedConnection)
self.choose_menu.addAction(ac)
self.rename_separator = self.choose_menu.addSeparator()
self.maintenance_menu = QMenu(_('Library Maintenance'))
ac = self.create_action(spec=(_('Library metadata backup status'),
'lt.png', None, None), attr='action_backup_status')
ac.triggered.connect(self.backup_status, type=Qt.QueuedConnection)
self.maintenance_menu.addAction(ac)
ac = self.create_action(spec=(_('Start backing up metadata of all books'),
'lt.png', None, None), attr='action_backup_metadata')
ac.triggered.connect(self.mark_dirty, type=Qt.QueuedConnection)
self.maintenance_menu.addAction(ac)
ac = self.create_action(spec=(_('Check library'), 'lt.png',
None, None), attr='action_check_library')
ac.triggered.connect(self.check_library, type=Qt.QueuedConnection)
self.maintenance_menu.addAction(ac)
ac = self.create_action(spec=(_('Restore database'), 'lt.png',
None, None),
attr='action_restore_database')
ac.triggered.connect(self.restore_database, type=Qt.QueuedConnection)
self.maintenance_menu.addAction(ac)
self.choose_menu.addMenu(self.maintenance_menu)
self.view_state_map = {}
self.restore_view_state.connect(self._restore_view_state,
type=Qt.QueuedConnection)
@property
def preserve_state_on_switch(self):
ans = getattr(self, '_preserve_state_on_switch', None)
if ans is None:
self._preserve_state_on_switch = ans = \
self.gui.library_view.preserve_state(require_selected_ids=False)
return ans
def pick_random(self, *args):
self.gui.iactions['Pick Random Book'].pick_random()
def library_name(self):
db = self.gui.library_view.model().db
path = db.library_path
if isbytestring(path):
path = path.decode(filesystem_encoding)
path = path.replace(os.sep, '/')
return self.stats.pretty(path)
def library_changed(self, db):
self.stats.library_used(db)
self.build_menus()
state = self.view_state_map.get(self.stats.canonicalize_path(
db.library_path), None)
if state is not None:
self.restore_view_state.emit(state)
def _restore_view_state(self, state):
self.preserve_state_on_switch.state = state
def initialization_complete(self):
self.library_changed(self.gui.library_view.model().db)
def build_menus(self):
if os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH', None):
return
db = self.gui.library_view.model().db
locations = list(self.stats.locations(db))
for ac in self.switch_actions:
ac.setVisible(False)
self.quick_menu.clear()
self.qs_locations = [i[1] for i in locations]
self.rename_menu.clear()
self.delete_menu.clear()
quick_actions, rename_actions, delete_actions = [], [], []
for name, loc in locations:
ac = self.quick_menu.addAction(name, Dispatcher(partial(self.switch_requested,
loc)))
quick_actions.append(ac)
ac = self.rename_menu.addAction(name, Dispatcher(partial(self.rename_requested,
name, loc)))
rename_actions.append(ac)
ac = self.delete_menu.addAction(name, Dispatcher(partial(self.delete_requested,
name, loc)))
delete_actions.append(ac)
qs_actions = []
for i, x in enumerate(locations[:len(self.switch_actions)]):
name, loc = x
ac = self.switch_actions[i]
ac.setText(name)
ac.setVisible(True)
qs_actions.append(ac)
self.quick_menu_action.setVisible(bool(locations))
self.rename_menu_action.setVisible(bool(locations))
self.delete_menu_action.setVisible(bool(locations))
self.gui.location_manager.set_switch_actions(quick_actions,
rename_actions, delete_actions, qs_actions,
self.action_choose)
def location_selected(self, loc):
enabled = loc == 'library'
self.qaction.setEnabled(enabled)
def rename_requested(self, name, location):
loc = location.replace('/', os.sep)
base = os.path.dirname(loc)
newname, ok = QInputDialog.getText(self.gui, _('Rename') + ' ' + name,
'<p>'+_('Choose a new name for the library <b>%s</b>. ')%name +
'<p>'+_('Note that the actual library folder will be renamed.'),
text=name)
newname = sanitize_file_name_unicode(unicode(newname))
if not ok or not newname or newname == name:
return
newloc = os.path.join(base, newname)
if os.path.exists(newloc):
return error_dialog(self.gui, _('Already exists'),
_('The folder %s already exists. Delete it first.') %
newloc, show=True)
if (iswindows and len(newloc) >
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT):
return error_dialog(self.gui, _('Too long'),
_('Path to library too long. Must be less than'
' %d characters.')%LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT,
show=True)
try:
os.rename(loc, newloc)
except:
import traceback
error_dialog(self.gui, _('Rename failed'),
_('Failed to rename the library at %s. '
'The most common cause for this is if one of the files'
' in the library is open in another program.') % loc,
det_msg=traceback.format_exc(), show=True)
return
self.stats.rename(location, newloc)
self.build_menus()
self.gui.iactions['Copy To Library'].build_menus()
def delete_requested(self, name, location):
loc = location.replace('/', os.sep)
self.stats.remove(location)
self.build_menus()
self.gui.iactions['Copy To Library'].build_menus()
info_dialog(self.gui, _('Library removed'),
_('The library %s has been removed from calibre. '
'The files remain on your computer, if you want '
'to delete them, you will have to do so manually.') % loc,
show=True)
if os.path.exists(loc):
open_local_file(loc)
def backup_status(self, location):
dirty_text = 'no'
try:
dirty_text = \
unicode(self.gui.library_view.model().db.dirty_queue_length())
except:
dirty_text = _('none')
info_dialog(self.gui, _('Backup status'), '<p>'+
_('Book metadata files remaining to be written: %s') % dirty_text,
show=True)
def mark_dirty(self):
db = self.gui.library_view.model().db
db.dirtied(list(db.data.iterallids()))
info_dialog(self.gui, _('Backup metadata'),
_('Metadata will be backed up while calibre is running, at the '
'rate of approximately 1 book every three seconds.'), show=True)
def restore_database(self):
m = self.gui.library_view.model()
db = m.db
if (iswindows and len(db.library_path) >
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT):
return error_dialog(self.gui, _('Too long'),
_('Path to library too long. Must be less than'
' %d characters. Move your library to a location with'
' a shorter path using Windows Explorer, then point'
' calibre to the new location and try again.')%
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT,
show=True)
from calibre.gui2.dialogs.restore_library import restore_database
m = self.gui.library_view.model()
m.stop_metadata_backup()
db = m.db
db.prefs.disable_setting = True
if restore_database(db, self.gui):
self.gui.library_moved(db.library_path, call_close=False)
def check_library(self):
from calibre.gui2.dialogs.check_library import CheckLibraryDialog, DBCheck
self.gui.library_view.save_state()
m = self.gui.library_view.model()
m.stop_metadata_backup()
db = m.db
db.prefs.disable_setting = True
d = DBCheck(self.gui, db)
d.start()
try:
d.conn.close()
except:
pass
d.break_cycles()
self.gui.library_moved(db.library_path, call_close=not
d.closed_orig_conn)
if d.rejected:
return
if d.error is None:
if not question_dialog(self.gui, _('Success'),
_('Found no errors in your calibre library database.'
' Do you want calibre to check if the files in your '
' library match the information in the database?')):
return
else:
return error_dialog(self.gui, _('Failed'),
_('Database integrity check failed, click Show details'
' for details.'), show=True, det_msg=d.error[1])
self.gui.status_bar.show_message(
_('Starting library scan, this may take a while'))
try:
QCoreApplication.processEvents()
d = CheckLibraryDialog(self.gui, m.db)
if not d.do_exec():
info_dialog(self.gui, _('No problems found'),
_('The files in your library match the information '
'in the database.'), show=True)
finally:
self.gui.status_bar.clear_message()
def look_for_portable_lib(self, db, location):
base = get_portable_base()
if base is None:
return False, None
loc = location.replace('/', os.sep)
candidate = os.path.join(base, os.path.basename(loc))
if db.exists_at(candidate):
newloc = candidate.replace(os.sep, '/')
self.stats.rename(location, newloc)
return True, newloc
return False, None
def switch_requested(self, location):
if not self.change_library_allowed():
return
db = self.gui.library_view.model().db
current_lib = self.stats.canonicalize_path(db.library_path)
self.view_state_map[current_lib] = self.preserve_state_on_switch.state
loc = location.replace('/', os.sep)
exists = db.exists_at(loc)
if not exists:
exists, new_location = self.look_for_portable_lib(db, location)
if exists:
location = new_location
loc = location.replace('/', os.sep)
if not exists:
d = MovedDialog(self.stats, location, self.gui)
ret = d.exec_()
self.build_menus()
self.gui.iactions['Copy To Library'].build_menus()
if ret == d.Accepted:
loc = d.newloc.replace('/', os.sep)
else:
return
#from calibre.utils.mem import memory
#import weakref
#from PyQt4.Qt import QTimer
#self.dbref = weakref.ref(self.gui.library_view.model().db)
#self.before_mem = memory()/1024**2
self.gui.library_moved(loc, allow_rebuild=True)
#QTimer.singleShot(5000, self.debug_leak)
def debug_leak(self):
import gc
from calibre.utils.mem import memory
ref = self.dbref
for i in xrange(3): gc.collect()
if ref() is not None:
print 'DB object alive:', ref()
for r in gc.get_referrers(ref())[:10]:
print r
print
print 'before:', self.before_mem
print 'after:', memory()/1024**2
print
self.dbref = self.before_mem = None
def qs_requested(self, idx, *args):
self.switch_requested(self.qs_locations[idx])
def count_changed(self, new_count):
text = self.base_text%new_count
a = self.qaction
a.setText(text)
tooltip = self.action_spec[2] + '\n\n' + text
a.setToolTip(tooltip)
a.setStatusTip(tooltip)
a.setWhatsThis(tooltip)
def choose_library(self, *args):
if not self.change_library_allowed():
return
from calibre.gui2.dialogs.choose_library import ChooseLibrary
self.gui.library_view.save_state()
db = self.gui.library_view.model().db
location = self.stats.canonicalize_path(db.library_path)
self.pre_choose_dialog_location = location
c = ChooseLibrary(db, self.choose_library_callback, self.gui)
c.exec_()
self.choose_dialog_library_renamed = getattr(c, 'library_renamed', False)
def choose_library_callback(self, newloc, copy_structure=False):
self.gui.library_moved(newloc, copy_structure=copy_structure,
allow_rebuild=True)
if getattr(self, 'choose_dialog_library_renamed', False):
self.stats.rename(self.pre_choose_dialog_location, prefs['library_path'])
self.build_menus()
self.gui.iactions['Copy To Library'].build_menus()
def change_library_allowed(self):
if os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH', None):
warning_dialog(self.gui, _('Not allowed'),
_('You cannot change libraries while using the environment'
' variable CALIBRE_OVERRIDE_DATABASE_PATH.'), show=True)
return False
if self.gui.job_manager.has_jobs():
warning_dialog(self.gui, _('Not allowed'),
_('You cannot change libraries while jobs'
' are running.'), show=True)
return False
return True
|
yeyanchao/calibre
|
src/calibre/gui2/actions/choose_library.py
|
Python
|
gpl-3.0
| 21,394
|
from os import path
from Components.HTMLComponent import HTMLComponent
from Components.GUIComponent import GUIComponent
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from ServiceReference import ServiceReference
from enigma import eListboxPythonMultiContent, eListbox, gFont, iServiceInformation, eServiceCenter, getDesktop, RT_HALIGN_LEFT, RT_VALIGN_CENTER
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber, supportedChannels
import skin
RT_HALIGN_LEFT = 0
TYPE_TEXT = 0
TYPE_VALUE_HEX = 1
TYPE_VALUE_DEC = 2
TYPE_VALUE_HEX_DEC = 3
TYPE_SLIDER = 4
TYPE_VALUE_ORBIT_DEC = 5
def to_unsigned(x):
return x & 0xFFFFFFFF
def ServiceInfoListEntry(a, b, valueType=TYPE_TEXT, param=4):
screenwidth = getDesktop(0).size().width()
if not isinstance(b, str):
if valueType == TYPE_VALUE_HEX:
b = ("0x%0" + str(param) + "x") % to_unsigned(b)
elif valueType == TYPE_VALUE_DEC:
b = str(b)
elif valueType == TYPE_VALUE_HEX_DEC:
b = ("0x%0" + str(param) + "x (%dd)") % (to_unsigned(b), b)
elif valueType == TYPE_VALUE_ORBIT_DEC:
direction = 'E'
if b > 1800:
b = 3600 - b
direction = 'W'
b = "%d.%d%s" % (b // 10, b % 10, direction)
else:
b = str(b)
x, y, w, h = skin.parameters.get("ServiceInfo",(0, 0, 300, 30))
xa, ya, wa, ha = skin.parameters.get("ServiceInfoLeft",(0, 0, 300, 25))
xb, yb, wb, hb = skin.parameters.get("ServiceInfoRight",(300, 0, 600, 25))
return [
#PyObject *type, *px, *py, *pwidth, *pheight, *pfnt, *pstring, *pflags;
(eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 0, RT_HALIGN_LEFT, ""),
(eListboxPythonMultiContent.TYPE_TEXT, xa, ya, wa, ha, 0, RT_HALIGN_LEFT, a),
(eListboxPythonMultiContent.TYPE_TEXT, xb, yb, wb, hb, 0, RT_HALIGN_LEFT, b)
]
class ServiceInfoList(HTMLComponent, GUIComponent):
def __init__(self, source):
GUIComponent.__init__(self)
self.l = eListboxPythonMultiContent()
self.list = source
self.l.setList(self.list)
self.fontName = "Regular"
self.fontSize = 23
self.ItemHeight = 25
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "font":
font = skin.parseFont(value, ((1,1),(1,1)))
self.fontName = font.family
self.fontSize = font.pointSize
elif attrib == "itemHeight":
self.ItemHeight = int(value)
else:
attribs.append((attrib,value))
self.skinAttributes = attribs
rc = GUIComponent.applySkin(self, desktop, screen)
self.setFontsize()
self.l.setItemHeight(self.ItemHeight)
return rc
GUI_WIDGET = eListbox
def setFontsize(self):
self.l.setFont(0, gFont(self.fontName, self.fontSize))
self.l.setFont(1, gFont(self.fontName, self.fontSize + 5))
def postWidgetCreate(self, instance):
self.instance.setContent(self.l)
self.setFontsize()
TYPE_SERVICE_INFO = 1
TYPE_TRANSPONDER_INFO = 2
class ServiceInfo(Screen):
def __init__(self, session, serviceref=None):
Screen.__init__(self, session)
Screen.setTitle(self, _("Service Information"))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"ok": self.close,
"cancel": self.close,
"red": self.information,
"green": self.pids,
"yellow": self.transponder,
"blue": self.tuner
}, -1)
if serviceref:
self.type = TYPE_TRANSPONDER_INFO
self.skinName="ServiceInfoSimple"
info = eServiceCenter.getInstance().info(serviceref)
self.transponder_info = info.getInfoObject(serviceref, iServiceInformation.sTransponderData)
# info is a iStaticServiceInformation, not a iServiceInformation
self.info = None
self.feinfo = None
else:
self.type = TYPE_SERVICE_INFO
self["key_red"] = self["red"] = Label(_("Service"))
self["key_green"] = self["green"] = Label(_("PIDs"))
self["key_yellow"] = self["yellow"] = Label(_("Multiplex"))
self["key_blue"] = self["blue"] = Label(_("Tuner status"))
service = session.nav.getCurrentService()
if service is not None:
self.info = service.info()
self.feinfo = service.frontendInfo()
else:
self.info = None
self.feinfo = None
tlist = [ ]
self["infolist"] = ServiceInfoList(tlist)
self.onShown.append(self.information)
def information(self):
if self.type == TYPE_SERVICE_INFO:
if self.session.nav.getCurrentlyPlayingServiceOrGroup():
name = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getServiceName()
refstr = self.session.nav.getCurrentlyPlayingServiceReference().toString()
else:
name = _("N/A")
refstr = _("N/A")
aspect = "-"
videocodec = "-"
videomode = "-"
resolution = "-"
if self.info:
videocodec = ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "-" )[self.info and self.info.getInfo(iServiceInformation.sVideoType)]
video_height = 0
video_width = 0
video_pol = " "
video_rate = 0
if path.exists("/proc/stb/vmpeg/0/yres"):
f = open("/proc/stb/vmpeg/0/yres", "r")
try:
video_height = int(f.read(),16)
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/xres"):
f = open("/proc/stb/vmpeg/0/xres", "r")
try:
video_width = int(f.read(),16)
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/progressive"):
f = open("/proc/stb/vmpeg/0/progressive", "r")
try:
video_pol = "p" if int(f.read(),16) else "i"
except:
pass
f.close()
if path.exists("/proc/stb/vmpeg/0/framerate"):
f = open("/proc/stb/vmpeg/0/framerate", "r")
try:
video_rate = int(f.read())
except:
pass
f.close()
fps = str((video_rate + 500) / 1000)
resolution = str(video_width) + "x" + str(video_height) + video_pol + fps
aspect = self.getServiceInfoValue(iServiceInformation.sAspect)
if aspect in ( 1, 2, 5, 6, 9, 0xA, 0xD, 0xE ):
aspect = "4:3"
else:
aspect = "16:9"
f = open("/proc/stb/video/videomode")
videomode = f.read()[:-1].replace('\n','')
f.close()
Labels = ( (_("Name"), name, TYPE_TEXT),
(_("Provider"), self.getServiceInfoValue(iServiceInformation.sProvider), TYPE_TEXT),
(_("Videoformat"), aspect, TYPE_TEXT),
(_("Videomode"), videomode, TYPE_TEXT),
(_("Videosize"), resolution, TYPE_TEXT),
(_("Videocodec"), videocodec, TYPE_TEXT),
(_("Namespace"), self.getServiceInfoValue(iServiceInformation.sNamespace), TYPE_VALUE_HEX, 8),
(_("Service reference"), refstr, TYPE_TEXT))
self.fillList(Labels)
else:
if self.transponder_info:
tp_info = ConvertToHumanReadable(self.transponder_info)
conv = { "tuner_type" : _("Transponder type"),
"system" : _("System"),
"modulation" : _("Modulation"),
"orbital_position" : _("Orbital position"),
"frequency" : _("Frequency"),
"symbol_rate" : _("Symbol rate"),
"bandwidth" : _("Bandwidth"),
"polarization" : _("Polarization"),
"inversion" : _("Inversion"),
"pilot" : _("Pilot"),
"rolloff" : _("Roll-off"),
"fec_inner" : _("FEC"),
"code_rate_lp" : _("Coderate LP"),
"code_rate_hp" : _("Coderate HP"),
"constellation" : _("Constellation"),
"transmission_mode": _("Transmission mode"),
"guard_interval" : _("Guard interval"),
"hierarchy_information": _("Hierarchy information") }
Labels = [(conv[i], tp_info[i], i == "orbital_position" and TYPE_VALUE_ORBIT_DEC or TYPE_VALUE_DEC) for i in tp_info.keys() if i in conv]
self.fillList(Labels)
def pids(self):
if self.type == TYPE_SERVICE_INFO:
Labels = ( (_("Video PID"), self.getServiceInfoValue(iServiceInformation.sVideoPID), TYPE_VALUE_HEX_DEC, 4),
(_("Audio PID"), self.getServiceInfoValue(iServiceInformation.sAudioPID), TYPE_VALUE_HEX_DEC, 4),
(_("PCR PID"), self.getServiceInfoValue(iServiceInformation.sPCRPID), TYPE_VALUE_HEX_DEC, 4),
(_("PMT PID"), self.getServiceInfoValue(iServiceInformation.sPMTPID), TYPE_VALUE_HEX_DEC, 4),
(_("TXT PID"), self.getServiceInfoValue(iServiceInformation.sTXTPID), TYPE_VALUE_HEX_DEC, 4),
(_("TSID"), self.getServiceInfoValue(iServiceInformation.sTSID), TYPE_VALUE_HEX_DEC, 4),
(_("ONID"), self.getServiceInfoValue(iServiceInformation.sONID), TYPE_VALUE_HEX_DEC, 4),
(_("SID"), self.getServiceInfoValue(iServiceInformation.sSID), TYPE_VALUE_HEX_DEC, 4))
self.fillList(Labels)
def showFrontendData(self, real):
if self.type == TYPE_SERVICE_INFO:
frontendData = self.feinfo and self.feinfo.getAll(real)
Labels = self.getFEData(frontendData)
self.fillList(Labels)
def transponder(self):
if self.type == TYPE_SERVICE_INFO:
self.showFrontendData(True)
def tuner(self):
if self.type == TYPE_SERVICE_INFO:
self.showFrontendData(False)
def getFEData(self, frontendDataOrg):
if frontendDataOrg and len(frontendDataOrg):
frontendData = ConvertToHumanReadable(frontendDataOrg)
if frontendDataOrg["tuner_type"] == "DVB-S":
return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("System"), frontendData["system"], TYPE_TEXT),
(_("Modulation"), frontendData["modulation"], TYPE_TEXT),
(_("Orbital position"), frontendData["orbital_position"], TYPE_VALUE_DEC),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Symbol rate"), frontendData["symbol_rate"], TYPE_VALUE_DEC),
(_("Polarization"), frontendData["polarization"], TYPE_TEXT),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("FEC"), frontendData["fec_inner"], TYPE_TEXT),
(_("Pilot"), frontendData.get("pilot", None), TYPE_TEXT),
(_("Roll-off"), frontendData.get("rolloff", None), TYPE_TEXT))
elif frontendDataOrg["tuner_type"] == "DVB-C":
return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("Modulation"), frontendData["modulation"], TYPE_TEXT),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Symbol rate"), frontendData["symbol_rate"], TYPE_VALUE_DEC),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("FEC"), frontendData["fec_inner"], TYPE_TEXT))
elif frontendDataOrg["tuner_type"] == "DVB-T":
channel = getChannelNumber(frontendDataOrg["frequency"], frontendDataOrg["tuner_number"]) if supportedChannels(frontendDataOrg["tuner_number"]) else None
return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT),
(_("Type"), frontendData["tuner_type"], TYPE_TEXT),
(_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC),
(_("Channel"), getChannelNumber(frontendData["frequency"], frontendData["tuner_number"]), TYPE_VALUE_DEC),
(_("Inversion"), frontendData["inversion"], TYPE_TEXT),
(_("Bandwidth"), frontendData["bandwidth"], TYPE_VALUE_DEC),
(_("Code rate LP"), frontendData["code_rate_lp"], TYPE_TEXT),
(_("Code rate HP"), frontendData["code_rate_hp"], TYPE_TEXT),
(_("Constellation"), frontendData["constellation"], TYPE_TEXT),
(_("Transmission mode"), frontendData["transmission_mode"], TYPE_TEXT),
(_("Guard interval"), frontendData["guard_interval"], TYPE_TEXT),
(_("Hierarchy info"), frontendData["hierarchy_information"], TYPE_TEXT))
return [ ]
def fillList(self, Labels):
tlist = [ ]
for item in Labels:
if item[1] is None:
continue
value = item[1]
if len(item) < 4:
tlist.append(ServiceInfoListEntry(item[0]+":", value, item[2]))
else:
tlist.append(ServiceInfoListEntry(item[0]+":", value, item[2], item[3]))
self["infolist"].l.setList(tlist)
def getServiceInfoValue(self, what):
if self.info is None:
return ""
v = self.info.getInfo(what)
if v == -2:
v = self.info.getInfoString(what)
elif v == -1:
v = _("N/A")
return v
|
vitmod/enigma2-1
|
lib/python/Screens/ServiceInfo.py
|
Python
|
gpl-2.0
| 12,007
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Signature on User',
'version': '2.0.0.0',
'category': 'Base',
'description': """
Add signature on res.user
""",
'author': 'Didotech SRL',
'website': 'http://www.didotech.com',
'depends': ['base'],
'init_xml': ['res_user_view.xml'],
'update_xml': ['res_user_view.xml'],
'demo_xml': [],
'test':[],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dhp-denero/LibrERP
|
res_user_signature/__openerp__.py
|
Python
|
agpl-3.0
| 1,454
|
import pickle
import os
import re
from models import Serializable, Category, PatternInfo
from views import FuseView, CrawlerView, AgarView, GrowingSpaceshipView, WaveView, GunView, InductionCoilView, \
MethuselahView, MovingBreederView, OscillatorView, MiscPatternView, PufferView, ReflectorView, RotorView, \
SawtoothView, SpaceshipView, StillLifeView, UnitCellView, WickView, WickstretcherView
view_map = {
"Agar": AgarView,
"Crawler": CrawlerView,
"Fuse": FuseView,
"Growingspaceship": GrowingSpaceshipView,
"Gun": GunView,
"InductionCoil": InductionCoilView,
"Methuselah": MethuselahView,
"MovingBreeder": MovingBreederView,
"Oscillator": OscillatorView,
"Pattern": MiscPatternView,
"Puffer": PufferView,
"Reflector": ReflectorView,
"Rotor": RotorView,
"Sawtooth": SawtoothView,
"Spaceship": SpaceshipView,
"Stilllife": StillLifeView,
"UnitCell": UnitCellView,
"Wave": WaveView,
"Wick": WickView,
"Wickstretcher": WickstretcherView
}
class Library(Serializable):
def __init__(self):
self.library = []
def add_category(self, category):
self.library.append(category)
def find_extra_rle_files(patterns_dir, rle_lookup):
all_rle = os.listdir(patterns_dir)
extra = []
for rle in all_rle:
if not rle_lookup.get(rle):
with open("%s/%s" % (patterns_dir, rle)) as f:
content = f.read()
match = re.match("#N\s+(.*?)\r?\n", content)
name = match.group(1) if match else rle.replace(".rle", "")
extra.append({"name": name, "rle": rle})
print "Extra rle file found: %s [%s]" % (rle, name.title())
return extra
def main(pickle_filename, patterns_dir, out_file):
categories = pickle.load(open(pickle_filename, "r"))
lib = Library()
rle_lookup = {}
for category in categories:
patterns = []
for pattern in category.patterns:
view_cls = view_map.get(category.name)
view = view_cls(pattern)
if hasattr(view, "rle"):
rle_lookup[view.rle] = True
path = "%s/%s" % (patterns_dir, view.rle)
if not os.path.exists(path):
print "%s is missing in %s, skipping..." % (view.rle, patterns_dir)
delattr(view, "rle")
patterns.append(view)
lib.add_category(Category(category.name, patterns))
extra = find_extra_rle_files(patterns_dir, rle_lookup)
lib.add_category(Category("Assorted", extra))
with open(out_file, "w") as jsonfile:
jsonfile.write(lib.to_json())
if __name__ == "__main__":
main("scraper.output.p", "../patterns", "library.json")
|
mir3z/life.js
|
library-scrapper/library-builder.py
|
Python
|
mit
| 2,763
|
from django.contrib import admin
#from .models import Tag
#import sys
#import importlib
#importlib.reload(sys)
#admin.site.register(Tag)
# Register your models here.
|
summerzhangft/summer
|
tag/admin.py
|
Python
|
gpl-3.0
| 167
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.