code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import FeedMe
version = FeedMe.__version__
setup(
name='FeedMe',
version=version,
author='',
author_email='andrew@nimblemachine.com',
packages=[
'FeedMe',
],
include_package_data=True,
install_requires=[
'Django>=1.6.1',
],
zip_safe=False,
scripts=['FeedMe/manage.py'],
)
|
nimblemachine/feedme
|
setup.py
|
Python
|
apache-2.0
| 498
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import dateutil.parser
import re
from typing import Any, DefaultDict, Dict, FrozenSet, List, Optional, Set, Tuple, Union
from django.http import HttpRequest, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from django.db import connection
from rest_framework.decorators import api_view
from catmaid.models import UserRole, Project, Class, ClassInstance, \
ClassInstanceClassInstance, Relation, ReviewerWhitelist
from catmaid.control.authentication import requires_user_role, can_edit_or_fail
from catmaid.control.common import (get_relation_to_id_map,
get_class_to_id_map, get_request_bool, get_request_list)
def get_annotation_to_id_map(project_id:Union[int,str], annotations:List, relations=None,
classes=None) -> Dict:
"""Get a dictionary mapping annotation names to annotation IDs in a
particular project."""
if not relations:
relations = get_relation_to_id_map(project_id)
if not classes:
classes = get_class_to_id_map(project_id)
cursor = connection.cursor()
cursor.execute("""
SELECT ci.name, ci.id
FROM class_instance ci
JOIN UNNEST(%(annotations)s::text[]) query_annotation(name)
ON ci.name = query_annotation.name
WHERE project_id = %(project_id)s
AND ci.class_id = %(class_id)s
""", {
'project_id': project_id,
'class_id': classes['annotation'],
'annotations': annotations,
})
mapping = dict(cursor.fetchall())
return mapping
def get_annotated_entities(project_id:Union[int,str], params, relations=None, classes=None,
allowed_classes=['neuron', 'annotation'], sort_by=None, sort_dir=None,
range_start=None, range_length=None, with_annotations:bool=True,
with_skeletons:bool=True, with_timestamps:bool=False,
import_only:Union[None, str]=None, ignore_nonexisting:bool=False,
with_name:bool=True, with_type:bool=True) -> Tuple[List, int]:
"""Get a list of annotated entities based on the passed in search criteria.
"""
if not relations:
relations = get_relation_to_id_map(project_id)
if not classes:
classes = get_class_to_id_map(project_id)
# Get IDs of constraining classes.
allowed_class_idx = {classes[c]:c for c in allowed_classes}
allowed_class_ids = list(allowed_class_idx.keys())
# One list of annotation sets for requested annotations and one for those
# of which subannotations should be included
annotation_sets:Set[FrozenSet] = set()
not_annotation_sets:Set[FrozenSet] = set()
annotation_sets_to_expand:Set[FrozenSet] = set()
# Get name, annotator and time constraints, if available
name = params.get('name', "").strip()
name_not = get_request_bool(params, 'name_not', False)
name_exact = get_request_bool(params, 'name_exact', False)
name_case_sensitive = get_request_bool(params, 'name_case_sensitive', False)
try:
annotator_ids = set(map(int, params.getlist('annotated_by')))
except AttributeError as e:
# If no getlist() method is found on <params>, the passed in objects is
# no QueryDict, but likely a regular dict. Accept this as okay.
annotator_ids = set()
start_date = params.get('annotation_date_start', "").strip()
end_date = params.get('annotation_date_end', "").strip()
# Allow parameterization of annotations using annotation names instead of IDs.
annotation_reference = params.get('annotation_reference', 'id')
if annotation_reference not in ('id', 'name'):
raise ValueError("Only 'id' and 'name' are accepted for the annotation_reference parameter")
# If annotation_names have been passed in, find matching IDs
if annotation_reference == 'name':
# Find annotation references
annotation_names:Set = set()
for key in params:
if key.startswith('annotated_with') or \
key.startswith('not_annotated_with') or \
key.startswith('sub_annotated_with'):
if len(params[key]) > 0:
annotation_names |= set(params[key].split(','))
annotation_id_map = get_annotation_to_id_map(project_id, list(annotation_names))
def to_id(inval) -> Optional[int]: # Python wants the signatures for "conditional program variants" to be the same, incl variable names
id = annotation_id_map.get(inval)
if not id:
if ignore_nonexisting:
return None
raise ValueError("Unknown annotation: " + inval)
return id
else:
def to_id(inval) -> Optional[int]:
return int(inval)
# Collect annotations and sub-annotation information. Each entry can be a
# list of IDs, which will be treated as or-combination.
for key in params:
if key.startswith('annotated_with'):
if len(params[key]) > 0:
annotation_set = frozenset(to_id(a) for a in params[key].split(',') if to_id(a) is not None)
annotation_sets.add(annotation_set)
elif key.startswith('not_annotated_with'):
if len(params[key]) > 0:
not_annotation_set = frozenset(to_id(a) for a in params[key].split(',') if to_id(a) is not None)
not_annotation_sets.add(not_annotation_set)
elif key.startswith('sub_annotated_with'):
if len(params[key]) > 0:
annotation_set = frozenset(to_id(a) for a in params[key].split(',') if to_id(a) is not None)
annotation_sets_to_expand.add(annotation_set)
filters = [
'ci.project_id = %(project_id)s',
'ci.class_id = ANY (%(class_ids)s)'
]
params = {
"project_id": project_id,
"class_ids": allowed_class_ids,
"annotated_with": relations['annotated_with'],
"model_of": relations['model_of']
}
if len(annotator_ids) > 0:
params['annotator_ids'] = list(annotator_ids)
if start_date:
params['start_date'] = start_date
if end_date:
params['end_date'] = end_date
# If a name is given, add this to the query. If its first character is a
# slash, treat it as regex. There is a a trigram index and a upper()
# expression index on class_instance.name and we add checks to utilize both,
# if possible. This is mainly useful for exact name queries, both
# case-sensitive and case insensitive as well as regex and normal.
if name:
is_regex = name.startswith('/')
if is_regex:
op = '~' if name_case_sensitive else '~*'
upper_name_op = '~'
params["name"] = name[1:]
else:
op = '~~' if name_case_sensitive else '~~*'
upper_name_op = '~~'
# LIKE (~~) and ILIKE (~~*) treat _ and % as wildcards, therefore
# they need to be escaped in the input.
name = name.replace('_', '\\_').replace('%', '\\%')
params["name"] = name if name_exact else ('%' + name + '%')
if name_not:
filters.append(f"ci.name !{op} %(name)s")
filters.append(f"upper(ci.name) !{upper_name_op} upper(%(name)s)")
else:
filters.append(f"ci.name {op} %(name)s")
filters.append(f"upper(ci.name) {upper_name_op} upper(%(name)s)")
# Map annotation sets to their expanded sub-annotations
sub_annotation_ids = get_sub_annotation_ids(project_id, annotation_sets_to_expand,
relations, classes)
# Collect all annotations and their sub-annotation IDs (if requested) in a
# set each. For the actual query each set is connected with AND while
# for everything within one set OR is used.
annotation_id_sets = []
for annotation_set in annotation_sets:
current_annotation_ids = set(annotation_set)
# Add sub annotations, if requested
sa_ids = sub_annotation_ids.get(annotation_set)
if sa_ids and len(sa_ids):
current_annotation_ids.update(sa_ids)
annotation_id_sets.append(current_annotation_ids)
not_annotation_id_sets = []
for not_annotation_set in not_annotation_sets:
current_not_annotation_ids = set(not_annotation_set)
# Add sub annotations, if requested
sa_ids = sub_annotation_ids.get(not_annotation_set)
if sa_ids and len(sa_ids):
current_not_annotation_ids.update(sa_ids)
not_annotation_id_sets.append(current_not_annotation_ids)
# Build needed joins for annotated_with search criteria
joins = []
fields= ['ci.id, ci.user_id', 'ci.creation_time', 'ci.edition_time',
'ci.project_id', 'ci.class_id', 'ci.name', 'skel_link.skeletons']
creation_timestamp_fields = []
edition_timestamp_fields = []
for n, annotation_id_set in enumerate(annotation_id_sets):
joins.append(f"""
INNER JOIN class_instance_class_instance cici{n}
ON ci.id = cici{n}.class_instance_a
""")
filters.append(f"""
cici{n}.relation_id = %(annotated_with)s AND
cici{n}.class_instance_b = ANY (%(cici{n}_ann)s)
""")
if with_timestamps:
c_field = f'cici{n}.creation_time'
e_field = f'cici{n}.edition_time'
fields.append(c_field)
fields.append(e_field)
creation_timestamp_fields.append(c_field)
edition_timestamp_fields.append(e_field)
params[f'cici{n}_ann'] = list(annotation_id_set)
# Add annotator and time constraints, if available
if annotator_ids:
filters.append(f"""
cici{n}.user_id = ANY (%(annotator_ids)s)
""")
if start_date:
filters.append(f"""
cici{n}.creation_time >= %(start_date)s
""")
if end_date:
filters.append(f"""
cici{n}.creation_time <= %(end_date)s
""")
# To exclude class instsances that are linked to particular annotation, all
# annotations are collected and if in this list of annotations contains an
# exclusion annotation, it is removed.
if not_annotation_sets:
joins.append("""
LEFT JOIN LATERAL (
SELECT cici_a.class_instance_a AS id,
array_agg(cici_a.class_instance_b) AS annotations
FROM class_instance_class_instance cici_a
WHERE cici_a.class_instance_a = ci.id
AND cici_a.relation_id = %(annotated_with)s
GROUP BY 1
) ann_link ON ci.id = ann_link.id
""")
for n, anno_id_set in enumerate(not_annotation_sets):
filters.append(f"""
NOT (ann_link.annotations && %(cici_ex{n}_ann)s::bigint[])
""")
params[f'cici_ex{n}_ann'] = list(anno_id_set)
# The basic query
query = """
SELECT {fields}
FROM class_instance ci
{joins}
WHERE {where}
{sort}
{offset}
"""
cursor = connection.cursor()
# If there are range limits and given that it is likely that there are many
# entities returned, it is more efficient to get the total result number
# with two queries: 1. Get total number of neurons 2. Get limited set. The
# (too expensive) alternative would be to get all neurons for counting and
# limiting on the Python side.
num_total_records = None
offset = ""
if range_start is not None and range_length is not None:
# Get total number of results with separate query. No sorting or offset
# is needed for this.
query_fmt_params = {
'fields': 'COUNT(*)',
'joins': '\n'.join(joins),
'where': ' AND '.join(filters),
'sort': '',
'offset': ''
}
cursor.execute(query.format(**query_fmt_params), params)
num_total_records = cursor.fetchone()[0]
offset = "OFFSET %(range_start)s LIMIT %(range_length)s"
params['range_start'] = int(range_start)
params['range_length'] = int(range_length)
# Add skeleton ID info (if available)
joins.append("""
LEFT JOIN LATERAL (
SELECT cici_n.class_instance_b AS id,
array_agg(cici_n.class_instance_a) AS skeletons
FROM class_instance_class_instance cici_n
WHERE cici_n.class_instance_b = ci.id
AND cici_n.relation_id = %(model_of)s
GROUP BY 1
) skel_link ON ci.id = skel_link.id
""")
# Check if some nodes originate from an import transaction, if only a
# partial match is needed. This is done separately to use a more optimized
# query.
if import_only == 'partial' or import_only == 'full':
joins.append("""
JOIN catmaid_skeleton_summary css
ON css.skeleton_id = ANY(skel_link.skeletons)
""")
if import_only == 'partial':
filters.append('css.num_imported_nodes > 0')
else:
filters.append('css.num_imported_nodes > 0')
filters.append('css.num_imported_nodes = css.num_nodes')
elif import_only is None:
pass
else:
raise ValueError(f'Unknown import constraint mode: {import_only}')
query_fmt_params = {
"joins": "\n".join(joins),
"where": " AND ".join(filters),
"sort": "",
"offset": offset,
"fields": ', '.join(fields),
}
# Sort if requested
if sort_dir and sort_by:
regular_sort_orders = ('id', 'name', 'first_name', 'last_name')
timebased_sort_order = ('annotated_on', 'last_annotation_link_edit')
if sort_by not in regular_sort_orders and sort_by not in timebased_sort_order:
raise ValueError(f'Unknown sort direction: {sort_by}')
if sort_by in timebased_sort_order and not with_timestamps:
raise ValueError('Set <with_timestamps> parameter to true')
if sort_by in regular_sort_orders:
sort_col = sort_by
elif sort_by == 'annotated_on':
sort_by = ', '.join(creation_timestamp_fields)
elif sort_by == 'last_annotation_link_edit':
sort_by = ', '.join(edition_timestamp_fields)
query_fmt_params['sort'] = f"ORDER BY {sort_by} {sort_dir.upper()}"
# Execute query and build result data structure
cursor.execute(query.format(**query_fmt_params), params)
entities = []
seen_ids:Set = set()
for ent in cursor.fetchall():
# Don't export objects with same ID multiple times
if ent[0] in seen_ids:
continue
class_name = allowed_class_idx[ent[5]]
entity_info = {
'id': ent[0],
}
if with_name:
entity_info['name'] = ent[6]
if with_type:
entity_info['type'] = class_name
if with_timestamps:
entity_info['creation_time'] = ent[2]
entity_info['edition_time'] = ent[3]
# Depending on the type of entity, some extra information is added.
if class_name == 'neuron':
entity_info['skeleton_ids'] = ent[7]
entities.append(entity_info)
seen_ids.add(ent[0])
if num_total_records is None:
num_total_records = len(entities)
if with_annotations:
entity_ids = [e['id'] for e in entities]
# Make second query to retrieve annotations and skeletons
annotation_fields = ['class_instance_a', 'class_instance_b',
'class_instance_b__name', 'user_id']
if with_timestamps:
annotation_fields.append('creation_time')
annotation_fields.append('edition_time')
annotations = ClassInstanceClassInstance.objects.filter(
relation_id = relations['annotated_with'],
class_instance_a__id__in = entity_ids).order_by('id').values_list(
*annotation_fields)
annotation_dict:DefaultDict[Any, List] = defaultdict(list)
for a in annotations:
ann_data = {'id': a[1], 'name': a[2], 'uid': a[3]}
if with_timestamps:
ann_data['creation_time'] = a[4]
ann_data['edition_time'] = a[5]
annotation_dict[a[0]].append(ann_data)
for ent in entities:
ent['annotations'] = annotation_dict.get(ent['id'], [])
return entities, num_total_records
def get_sub_annotation_ids(project_id:Union[int,str], annotation_sets, relations, classes) -> Dict:
""" Sub-annotations are annotations that are annotated with an annotation
from the annotation_set passed. Additionally, transivitely annotated
annotations are returned as well. Note that all entries annotation_sets
must be frozenset instances, they need to be hashable.
"""
if not annotation_sets:
return {}
aaa_tuples = ClassInstanceClassInstance.objects.filter(
project_id=project_id,
class_instance_a__class_column=classes['annotation'],
class_instance_b__class_column=classes['annotation'],
relation_id = relations['annotated_with']).values_list(
'class_instance_b', 'class_instance_a')
# A set wrapper to keep a set in a dictionary
class set_wrapper:
def __init__(self):
self.data:Set = set()
# Create a dictionary of all annotations annotating a set of annotations
aaa:Dict = {}
for aa in aaa_tuples:
sa_set = aaa.get(aa[0])
if sa_set is None:
sa_set = set_wrapper()
aaa[aa[0]] = sa_set
sa_set.data.add(aa[1])
# Collect all sub-annotations by following the annotation hierarchy for
# every annotation in the annotation set passed.
sa_ids:Dict = {}
for annotation_set in annotation_sets:
# Start with an empty result set for each requested annotation set
ls:Set = set()
for a in annotation_set:
working_set = set([a])
while working_set:
parent_id = working_set.pop()
# Try to get the sub-annotations for this parent
child_ids = aaa.get(parent_id) or set_wrapper()
for child_id in child_ids.data:
if child_id not in sa_ids:
if child_id not in ls:
# Add all children as sub annotations
ls.add(child_id)
working_set.add(child_id)
# Store the result list for this ID
sa_ids[annotation_set] = list(ls)
return sa_ids
@api_view(['POST'])
@requires_user_role([UserRole.Browse])
def query_annotated_classinstances(request:HttpRequest, project_id:Optional[Union[int,str]] = None) -> JsonResponse:
"""Query entities based on various constraints
Entities are objects that can be referenced within CATMAID's semantic
space, e.g. neurons, annotations or stack groups. This API allows to query
them, mainly by annotations that have been used with them. Multiple
annotation parameters can be used to combine different annotation sets with
AND. Elements of one annotation parameter are combined with OR.
---
parameters:
- name: name
description: The name (or a part of it) of result elements.
type: string
paramType: form
- name: name_exact
description: |
Whether the name has to match exactly or can be a part of the result
name. This is typically faster than using a regular expression.
False by default.
type: bool
paramType: form
required: false
defaultValue: false
- name: name_case_sensitive
description: |
Whether the name has to match the exact letter case provided. False
by default.
type: bool
paramType: form
required: false
defaultValue: false
- name: annotated_by
description: A result element was annotated by a user with this ID.
type: integer
paramType: form
allowMultiple: true
- name: annotation_date_start
description: The earliest YYYY-MM-DD date result elements have been annotated at.
format: date
type: string
paramType: query
- name: annotation_date_end
description: The latest YYYY-MM-DD date result elements have been annotated at.
format: date
type: string
paramType: query
- name: annotated_with
description: |
A comma separated list of annotation IDs which all annotate the
result elements.
type: integer
paramType: form
allowMultiple: true
- name: not_annotated_with
description: |
A comma separated list of annotation IDs which don't annotate the
result elements.
type: integer
paramType: form
allowMultiple: true
- name: sub_annotated_with
description: |
A comma separated list of annotation IDs that are contained
in either 'annotated_with' or 'not_annotated_with' that get expanded to
also include their sub-annotations in the query (of which then at
least one has to match inclusion or exclusion respectively).
type: integer
paramType: form
allowMultiple: true
- name: with_annotations
description: Indicate if annotations of result elements should be returned.
type: boolean
paramType: form
- name: types
description: |
Allowed result types. Multple types can be passed with multiple
parameters. Defaults to 'neuron' and 'annotation'.
type: string
paramType: form
allowMultiple: true
- name: sort_by
description: Indicates how results are sorted.
type: string
defaultValue: id
enum: [id, name, first_name, last_name, 'annotated_on', 'last_annotation_link_edit']
paramType: form
- name: sort_dir
description: Indicates sorting direction.
type: string
defaultValue: asc
enum: [asc, desc]
paramType: form
- name: range_start
description: The first result element index.
type: integer
paramType: form
- name: range_length
description: The number of results
type: integer
paramType: form
- name: annotation_reference
description: Whether annoation references are IDs or names, can be 'id' or 'name.
type: string
enum: [id, name]
defaultValue: id
required: false
paramType: form
- name: with_timestamps
description: Whether to return also the annotation time for each entity.
type: boolean
required: false
defaultValue: false
paramType: form
- name: import_only
description: |
Whether and how only skeletons that contain imported fragments
should be returned. If set to 'partial', only skeletons that have at
least one imported node in them are returned. If set to 'full', only
skeletons that are fully imported are returned. Not set by default.
type: string
required: false
paramType: form
- name: ignore_nonexisting
description: |
Whether non-existing query annotations should be ignored instead of
raising an error.
type: boolean
required: false
defaultValue: false
paramType: form
- name: with_name
description: Whether to return the name of each entity.
type: boolean
required: false
defaultValue: true
paramType: form
- name: with_type
description: Whether to return the type of each entity.
type: boolean
required: false
defaultValue: true
paramType: form
models:
annotated_entity:
id: annotated_entity
description: A result entity.
properties:
name:
type: string
description: The name of the entity
required: true
id:
type: integer
description: The id of the entity
required: true
skeleton_ids:
type: array
description: A list of ids of skeletons modeling this entity
required: true
items:
type: integer
type:
type: string
description: Type of the entity
required: true
type:
entities:
type: array
items:
$ref: annotated_entity
required: true
totalRecords:
type: integer
required: true
"""
p = get_object_or_404(Project, pk = project_id)
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
# Type constraints
allowed_classes = get_request_list(request.POST, 'types', ['neuron', 'annotation'])
sort_by = request.POST.get('sort_by', 'id')
if sort_by not in ('id', 'name', 'first_name', 'last_name', 'annotated_on',
'last_annotation_link_edit'):
raise ValueError("Only 'id', 'name', 'first_name' and 'last_name' "
"are allowed for the 'sort-dir' parameter")
sort_dir = request.POST.get('sort_dir', 'asc')
if sort_dir not in ('asc', 'desc'):
raise ValueError("Only 'asc' and 'desc' are allowed for the 'sort-dir' parameter")
range_start = request.POST.get('range_start', None)
range_length = request.POST.get('range_length', None)
with_annotations = get_request_bool(request.POST, 'with_annotations', False)
with_timestamps = get_request_bool(request.POST, 'with_timestamps', False)
with_name = get_request_bool(request.POST, 'with_name', True)
with_type = get_request_bool(request.POST, 'with_type', True)
import_only = request.POST.get('import_only', None)
import_only = request.POST.get('import_only', None)
ignore_nonexisting = get_request_bool(request.POST, 'ignore_nonexisting', False)
entities, num_total_records = get_annotated_entities(p.id, request.POST,
relations, classes, allowed_classes, sort_by, sort_dir, range_start,
range_length, with_annotations, with_timestamps=with_timestamps,
import_only=import_only, ignore_nonexisting=ignore_nonexisting,
with_name=with_name, with_type=with_type)
return JsonResponse({
'entities': entities,
'totalRecords': num_total_records,
})
def _update_neuron_annotations(project_id:Union[int,str], neuron_id,
annotation_map:Dict[str,Any], losing_neuron_id=None) -> None:
""" Ensure that the neuron is annotated_with only the annotations given.
These annotations are expected to come as dictionary of annotation name
versus annotator ID.
If losing_neuron_id is provided, annotations missing on the neuron that
exist for the losing neuron will be updated to refer to neuon_id, rather
than created from scratch. This preserves provenance such as creation times.
"""
annotated_with = Relation.objects.get(project_id=project_id,
relation_name='annotated_with')
qs = ClassInstanceClassInstance.objects.filter(
class_instance_a__id=neuron_id, relation=annotated_with)
qs = qs.select_related('class_instance_b').values_list(
'class_instance_b__name', 'class_instance_b__id', 'id')
existing_annotations = {e[0]: {
'annotation_id': e[1],
'cici_id': e[2]
} for e in qs}
update = set(annotation_map.keys())
existing = set(existing_annotations.keys())
missing = update - existing
if losing_neuron_id:
qs = ClassInstanceClassInstance.objects.filter(
class_instance_a__id=losing_neuron_id, relation=annotated_with)
qs = qs.select_related('class_instance_b').values_list(
'class_instance_b__name', 'id')
losing_existing_annotations = dict(qs)
losing_missing = frozenset(losing_existing_annotations.keys()) & missing
if losing_missing:
cici_ids = [losing_existing_annotations[k] for k in losing_missing]
u_ids = [annotation_map[k]['user_id'] for k in losing_missing]
cursor = connection.cursor()
cursor.execute('''
UPDATE class_instance_class_instance
SET class_instance_a = %s, user_id = missing.u_id
FROM UNNEST(%s::bigint[], %s::integer[]) AS missing(cici_id, u_id)
WHERE id = missing.cici_id;
''', (neuron_id, cici_ids, u_ids))
missing = missing - losing_missing
missing_map = {k:v for k,v in annotation_map.items() if k in missing}
_annotate_entities(project_id, [neuron_id], missing_map)
to_delete = existing - update
to_delete_ids = tuple(link['annotation_id'] for name, link in existing_annotations.items() \
if name in to_delete)
ClassInstanceClassInstance.objects.filter(project=project_id,
class_instance_a_id=neuron_id, relation=annotated_with,
class_instance_b_id__in=to_delete_ids).delete()
for aid in to_delete_ids:
delete_annotation_if_unused(project_id, aid, annotated_with)
to_update = update.intersection(existing)
to_update_ids = list(map(lambda x: existing_annotations[x]['cici_id'], to_update))
to_update_et = list(map(lambda x: annotation_map[x]['edition_time'], to_update))
to_update_ct = list(map(lambda x: annotation_map[x]['creation_time'], to_update))
cursor = connection.cursor()
cursor.execute("""
UPDATE class_instance_class_instance
SET creation_time = to_update.creation_time
FROM UNNEST(%s::bigint[], %s::timestamptz[])
AS to_update(cici_id, creation_time)
WHERE id = to_update.cici_id;
UPDATE class_instance_class_instance
SET edition_Time = to_update.edition_time
FROM UNNEST(%s::bigint[], %s::timestamptz[])
AS to_update(cici_id, edition_time)
WHERE id = to_update.cici_id;
""", (to_update_ids,
to_update_ct,
to_update_ids,
to_update_et))
def delete_annotation_if_unused(project, annotation, relation) -> Tuple[bool, int]:
""" Delete the given annotation instance if it is not used anymore.
Returns a tuple where the first element states if
"""
num_annotation_links = ClassInstanceClassInstance.objects.filter(
project=project, class_instance_b=annotation, relation=relation).count()
if num_annotation_links:
return False, num_annotation_links
else:
# See if the annotation is annotated itself
meta_annotation_links = ClassInstanceClassInstance.objects.filter(
project=project, class_instance_a=annotation, relation=relation)
meta_annotation_ids = [cici.class_instance_b_id for cici in meta_annotation_links]
# Delete annotation
ClassInstance.objects.filter(project=project, id=annotation).delete()
# Delete also meta annotation instances, if they exist
for ma in meta_annotation_ids:
delete_annotation_if_unused(project, ma, relation)
return True, 0
def _annotate_entities(project_id:Union[int,str], entity_ids, annotation_map:Dict[str,Any],
update_existing=False) -> Tuple[Dict,Set,Set]:
""" Annotate the entities with the given <entity_ids> with the given
annotations. These annotations are expected to come as dictionary of
annotation name versus an object with at least the field 'user_id'
annotator ID. If the 'creation_time' and/or 'edition_time' fields are
available, they will be used for the respective columns. A listof all
annotation class instances that have been used is returned. Annotation
names can contain the counting pattern {nX} with X being a number. This
will add an incrementing number starting from X for each entity.
"""
new_annotations = set()
existing_annotations = set()
r = Relation.objects.get(project_id = project_id,
relation_name = 'annotated_with')
annotation_class = Class.objects.get(project_id = project_id,
class_name = 'annotation')
annotation_objects = {}
# Create a regular expression to find allowed patterns. The first group is
# the whole {nX} part, while the second group is X only.
counting_pattern = re.compile(r"(\{n(\d+)\})")
for annotation, meta in annotation_map.items():
# Look for patterns, replace all {n} with {n1} to normalize
annotation = annotation.replace("{n}", "{n1}")
# Find all {nX} in the annotation name
expanded_annotations = {}
if counting_pattern.search(annotation):
# Create annotation names based on the counting patterns found, for
# each entitiy.
for i, eid in enumerate(entity_ids):
a = annotation
while True:
# Find next match and cancel if there isn't any
m = counting_pattern.search(a)
if not m:
break
# Replace match
count = int(m.groups()[1]) + i
a = m.string[:m.start()] + str(count) + m.string[m.end():]
# Remember this annotation for the current entity
expanded_annotations[a] = [eid]
else:
# No matches, so use same annotation for all entities
expanded_annotations = {annotation: entity_ids}
# Make sure the annotation's class instance exists.
for a, a_entity_ids in expanded_annotations.items():
ci, created = ClassInstance.objects.get_or_create(
project_id=project_id, name=a,
class_column=annotation_class,
defaults={'user_id': meta['user_id']})
if created:
new_annotations.add(ci.id)
newly_annotated = set()
# Annotate each of the entities. Don't allow duplicates.
for entity_id in a_entity_ids:
new_cici_defaults = {
'class_instance_a_id': entity_id,
'user_id': meta['user_id']
}
for field in ('creation_time', 'edition_time'):
value = meta.get(field)
if value:
new_cici_defaults[field] = value
cici, created = ClassInstanceClassInstance.objects.get_or_create(
project_id=project_id, relation=r,
class_instance_a__id=entity_id, class_instance_b=ci,
defaults=new_cici_defaults)
if created:
newly_annotated.add(entity_id)
else:
existing_annotations.add(ci.id)
if update_existing:
# Update creation time and edition_time, if requested
cici.update(**new_cici_defaults)
# Remember which entities got newly annotated
annotation_objects[ci] = newly_annotated
return annotation_objects, new_annotations, existing_annotations
def _annotate_entities_with_name(project_id:Union[int,str], user_id, entity_ids) -> Tuple[List[List[Any]], List[List[Any]]]:
cursor = connection.cursor()
annotated_with = Relation.objects.get(project_id=project_id,
relation_name='annotated_with')
annotation_class = Class.objects.get(project_id=project_id,
class_name='annotation')
name_annotation, _ = ClassInstance.objects.get_or_create(project_id=project_id,
class_column=annotation_class, name='Name', defaults={
'user_id': user_id,
})
entity_name_map = dict(ClassInstance.objects.filter(
pk__in=entity_ids).values_list('id', 'name'))
entity_names = set(entity_name_map.values())
existing_name_annotations = dict(ClassInstance.objects.filter(
project_id=project_id, class_column=annotation_class,
name__in=entity_names).values_list('name', 'id'))
missing_name_annotations = entity_names - set(existing_name_annotations.keys())
if missing_name_annotations:
# Escape single quotes by double-quoting
escaped_name_annotations = (n.replace("'", "''") for n in missing_name_annotations)
values = (f"({user_id}, {project_id}, {annotation_class.id}, '{x}')" for x in escaped_name_annotations)
values_str = ','.join(values) or '()'
cursor.execute(f"""
INSERT INTO class_instance (user_id, project_id, class_id, name)
VALUES {values_str}
RETURNING name, id;
""")
added_annotations = dict(cursor.fetchall())
existing_name_annotations.update(added_annotations)
# Now with all name annotations available we need to make sure all of them
# have the meta annotation 'Name'.
cursor.execute("""
INSERT INTO class_instance_class_instance (project_id, user_id,
class_instance_a, class_instance_b, relation_id)
SELECT %(project_id)s, %(user_id)s, ci.id, %(name_ann_id)s, %(rel_id)s
FROM class_instance ci
JOIN UNNEST(%(name_ann_names)s::text[]) q(name)
ON q.name = ci.name
LEFT JOIN class_instance_class_instance cici
ON cici.class_instance_a = ci.id
AND cici.class_instance_b = %(name_ann_id)s
AND cici.relation_id = %(rel_id)s
WHERE cici.id IS NULL
AND ci.project_id = %(project_id)s
AND ci.class_id = %(annotation_class_id)s
RETURNING id
""", {
'project_id': project_id,
'user_id': user_id,
'name_ann_id': name_annotation.id,
'rel_id': annotated_with.id,
'annotation_class_id': annotation_class.id,
'name_ann_names': list(existing_name_annotations.keys()),
})
created_name_links = cursor.fetchall()
# Now we have valid name annotations for each target entity. The final step
# is to link those name annotations to the entities.
cursor.execute("""
INSERT INTO class_instance_class_instance (project_id, user_id,
class_instance_a, class_instance_b, relation_id)
SELECT %(project_id)s, %(user_id)s, ci.id, ci_name.id, %(rel_id)s
FROM class_instance ci
JOIN UNNEST(%(entity_ids)s::bigint[]) q(id)
ON q.id = ci.id
JOIN class_instance ci_name
ON ci_name.name = ci.name
LEFT JOIN class_instance_class_instance cici
ON cici.class_instance_a = ci.id
AND cici.class_instance_b = ci_name.id
AND cici.relation_id = %(rel_id)s
WHERE cici.id IS NULL
AND ci.project_id = %(project_id)s
AND ci_name.project_id = %(project_id)s
AND ci_name.class_id = %(annotation_class_id)s
RETURNING class_instance_a
""", {
'project_id': project_id,
'user_id': user_id,
'name_ann_id': name_annotation.id,
'rel_id': annotated_with.id,
'entity_ids': entity_ids,
'annotation_class_id': annotation_class.id,
})
updated_cis = cursor.fetchall()
return updated_cis, created_name_links
@requires_user_role(UserRole.Annotate)
def annotate_entities(request:HttpRequest, project_id = None) -> JsonResponse:
p = get_object_or_404(Project, pk = project_id)
# Read keys in a sorted manner
sorted_keys = sorted(request.POST.keys())
annotations = get_request_list(request.POST, 'annotations', [])
meta_annotations = get_request_list(request.POST, 'meta_annotations', [])
entity_ids = get_request_list(request.POST, 'entity_ids', [], map_fn=int)
skeleton_ids = get_request_list(request.POST, 'skeleton_ids', [], map_fn=int)
if any(skeleton_ids):
skid_to_eid = dict(ClassInstance.objects.filter(project = p,
class_column__class_name = 'neuron',
cici_via_b__relation__relation_name = 'model_of',
cici_via_b__class_instance_a__in = skeleton_ids).values_list(
'cici_via_b__class_instance_a', 'id'))
entity_ids += [skid_to_eid[skid] for skid in skeleton_ids]
# Annotate enties
annotation_map = {a: { 'user_id': request.user.id } for a in annotations}
annotation_objs, new_annotations, existing_annotations = _annotate_entities(
project_id, entity_ids, annotation_map)
# Annotate annotations
if meta_annotations:
annotation_ids = [a.id for a in annotation_objs.keys()]
meta_annotation_map = {ma: { 'user_id': request.user.id } for ma in meta_annotations}
meta_annotation_objs, new_meta_annotations, existing_meta_annotations = \
_annotate_entities(project_id, annotation_ids, meta_annotation_map)
# Keep track of new annotations
new_annotations.update(new_meta_annotations)
# Update used annotation objects set
for ma, me in meta_annotation_objs.items():
entities = annotation_objs.get(ma)
if entities:
entities.update(me)
else:
annotation_objs[ma] = me
result = {
'message': 'success',
'annotations': [{
'name': a.name,
'id': a.id,
'entities': list(e)
} for a,e in annotation_objs.items()],
'new_annotations': list(new_annotations),
'existing_annotations': list(existing_annotations),
}
return JsonResponse(result)
@api_view(['POST'])
@requires_user_role(UserRole.Annotate)
def add_neuron_name_annotations(request:HttpRequest, project_id = None) -> JsonResponse:
"""Add missing neuron name annotations.
To each passed in neuron, a list of neuron IDs and/or skelton IDs, the
neuron name stored in the neuron's base name is added as annotation. Each
neuron name annotation is meta-annotated with a "Name" annotation.
---
parameters:
skeleton_ids:
type: array
description: A list of skeleton IDs to update
required: false
items:
type: integer
entity_ids:
type: array
description: A list of target entity IDs to update
required: false
items:
type: integer
"""
p = get_object_or_404(Project, pk = project_id)
entity_ids = get_request_list(request.POST, 'entity_ids', [], map_fn=int)
skeleton_ids = get_request_list(request.POST, 'skeleton_ids', [], map_fn=int)
if not any(entity_ids):
if not any(skeleton_ids):
raise ValueError("Need either 'skeleton_ids' or 'entity_ids'")
entity_ids = []
if any(skeleton_ids):
skid_to_eid = dict(ClassInstance.objects.filter(project = p,
class_column__class_name = 'neuron',
cici_via_b__relation__relation_name = 'model_of',
cici_via_b__class_instance_a__in = skeleton_ids).values_list(
'cici_via_b__class_instance_a', 'id'))
entity_ids += [skid_to_eid[skid] for skid in skeleton_ids]
updated_cis, created_name_links = _annotate_entities_with_name(
project_id, request.user.id, entity_ids)
result = {
'message': 'success',
'updated_cis': updated_cis,
'created_meta_links': len(created_name_links),
}
return JsonResponse(result)
@requires_user_role(UserRole.Annotate)
def remove_annotations(request:HttpRequest, project_id=None) -> JsonResponse:
""" Removes an annotation from one or more entities.
"""
annotation_ids = get_request_list(request.POST, 'annotation_ids', [], map_fn=int)
entity_ids = get_request_list(request.POST, 'entity_ids', [], map_fn=int)
if not annotation_ids:
raise ValueError("No annotation IDs provided")
if not entity_ids:
raise ValueError("No entity IDs provided")
# Remove individual annotations
deleted_annotations = {}
deleted_links = []
num_left_annotations = {}
for annotation_id in annotation_ids:
cicis_to_delete, missed_cicis, deleted, num_left = _remove_annotation(
request.user, project_id, entity_ids, annotation_id)
# Keep track of results
num_left_annotations[str(annotation_id)] = num_left
targetIds = []
for cici in cicis_to_delete:
deleted_links.append(cici.id)
# The target is class_instance_a, because we deal with the
# "annotated_with" relation.
targetIds.append(cici.class_instance_a_id)
if targetIds:
deleted_annotations[annotation_id] = {
'targetIds': targetIds
}
return JsonResponse({
'deleted_annotations': deleted_annotations,
'deleted_links': deleted_links,
'left_uses': num_left_annotations
})
@requires_user_role(UserRole.Annotate)
def remove_annotation(request:HttpRequest, project_id=None, annotation_id=None) -> JsonResponse:
""" Removes an annotation from one or more entities.
"""
entity_ids = get_request_list(request.POST, 'entity_ids', [], map_fn=int)
cicis_to_delete, missed_cicis, deleted, num_left = _remove_annotation(
request.user, project_id, entity_ids, annotation_id)
if len(cicis_to_delete) > 1:
message = "Removed annotation from %s entities." % len(cicis_to_delete)
elif len(cicis_to_delete) == 1:
message = "Removed annotation from one entity."
else:
message = "No annotation removed."
if missed_cicis:
message += " Couldn't de-annotate %s entities, due to the lack of " \
"permissions." % len(missed_cicis)
if deleted:
message += " Also removed annotation instance, because it isn't used " \
"anywhere else."
else:
message += " There are %s links left to this annotation." % num_left
return JsonResponse({
'message': message,
'deleted_annotation': deleted,
'left_uses': num_left
})
def _remove_annotation(user, project_id:Union[int,str], entity_ids, annotation_id) -> Tuple[List, List, int, int]:
"""Remove an annotation made by a certain user in a given project on a set
of entities (usually neurons and annotations). Returned is a 4-tuple which
holds the deleted annotation links, the list of links that couldn't be
deleted due to lack of permission, if the annotation itself was removed
(because it wasn't used anymore) and how many uses of this annotation are
left.
"""
p = get_object_or_404(Project, pk=project_id)
relations = dict(Relation.objects.filter(
project_id=project_id).values_list('relation_name', 'id'))
# Get CICI instance representing the link
cici_n_a = ClassInstanceClassInstance.objects.filter(project=p,
relation_id=relations['annotated_with'],
class_instance_a__id__in=entity_ids,
class_instance_b__id=annotation_id)
# Make sure the current user has permissions to remove the annotation.
missed_cicis = []
cicis_to_delete = []
for cici in cici_n_a:
try:
can_edit_or_fail(user, cici.id, 'class_instance_class_instance')
cicis_to_delete.append(cici)
except Exception:
# Remember links for which permissions are missing
missed_cicis.append(cici)
# Remove link between entity and annotation for all links on which the user
# the necessary permissions has.
if cicis_to_delete:
ClassInstanceClassInstance.objects \
.filter(id__in=[cici.id for cici in cicis_to_delete]) \
.delete()
# Remove the annotation class instance, regardless of the owner, if there
# are no more links to it
annotated_with = Relation.objects.get(project_id=project_id,
relation_name='annotated_with')
deleted, num_left = delete_annotation_if_unused(project_id, annotation_id,
annotated_with)
return cicis_to_delete, missed_cicis, deleted, num_left
@api_view(['POST'])
@requires_user_role(UserRole.Annotate)
def replace_annotations(request:HttpRequest, project_id=None) -> JsonResponse:
""" Replace a set of annotations for a list of target class instances by
removing a set of annotations and adding another one.
"""
to_remove = set(get_request_list(request.POST, 'to_remove', []))
to_add = set(get_request_list(request.POST, 'to_add', []))
target_ids = get_request_list(request.POST, 'target_ids', [], map_fn=int)
cursor = connection.cursor()
classes = get_class_to_id_map(project_id, ('annotation',), cursor)
id_map = dict(ClassInstance.objects.filter(name__in=to_remove.union(to_add),
project_id=project_id).values_list('name', 'id'))
deleted_total = 0
for a in to_remove:
annotation_id = id_map.get(a)
# Non-existant annotations don't need to be removed
if annotation_id is None:
continue
cicis_to_delete, missed_cicis, deleted, num_left = _remove_annotation(
request.user, project_id, target_ids, annotation_id)
deleted_total += deleted
to_add_map = {}
for a in to_add:
to_add_map[a] = {
'user_id': request.user.id,
}
annotations, new_annotations, existing_annotation = _annotate_entities(project_id,
target_ids, to_add_map)
print(annotations)
print(new_annotations)
print(existing_annotation)
return JsonResponse({
'n_linked_annotations': len(new_annotations) + len(existing_annotation),
'n_unlinked_annotations': deleted_total,
})
def create_annotation_query(project_id, param_dict):
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
annotation_query = ClassInstance.objects.filter(project_id=project_id,
class_column__id=classes['annotation'])
# Meta annotations are annotations that are used to annotate other
# annotations.
meta_annotations = [v for k,v in param_dict.items()
if k.startswith('annotations[')]
for meta_annotation in meta_annotations:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a = meta_annotation)
# If information about annotated annotations is found, the current query
# will include only annotations that are meta annotations for it.
annotated_annotations = [v for k,v in param_dict.items()
if k.startswith('annotates[')]
for sub_annotation in annotated_annotations:
annotation_query = annotation_query.filter(
cici_via_a__relation_id = relations['annotated_with'],
cici_via_a__class_instance_b = sub_annotation)
# If parallel_annotations is given, only annotations are returned, that
# are used alongside with these.
parallel_annotations = [v for k,v in param_dict.items()
if k.startswith('parallel_annotations[')]
for p_annotation in parallel_annotations:
annotation_query = annotation_query.filter(
cici_via_b__class_instance_a__cici_via_a__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__cici_via_a__class_instance_b = p_annotation)
# Passing in a user ID causes the result set to only contain annotations
# that are used by the respective user. The query filter could lead to
# duplicate entries, therefore distinct() is added here.
user_id = param_dict.get('user_id', None)
if user_id:
user_id = int(user_id)
annotation_query = annotation_query.filter(
cici_via_b__user__id=user_id).distinct()
# With the help of the neuron_id field, it is possible to restrict the
# result set to only show annotations that are used for a particular neuron.
neuron_id = param_dict.get('neuron_id', None)
if neuron_id:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__id=neuron_id)
# Instead of a neuron a user can also use to skeleton id to constrain the
# annotation set returned. This is implicetely a neuron id restriction.
skeleton_id = param_dict.get('skeleton_id', None)
if skeleton_id:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__cici_via_b__relation_id = relations['model_of'],
cici_via_b__class_instance_a__cici_via_b__class_instance_a__id = skeleton_id)
# If annotations to ignore are passed in, they won't appear in the
# result set.
ignored_annotations = [v for k,v in param_dict.items()
if k.startswith('ignored_annotations[')]
if ignored_annotations:
annotation_query = annotation_query.exclude(
name__in=ignored_annotations)
return annotation_query
def generate_co_annotation_query(project_id:Union[int,str], co_annotation_ids, classIDs, relationIDs) -> Tuple[str,str]:
if not co_annotation_ids:
raise ValueError("Need co-annotations")
tables = []
where = []
annotation_class = classIDs['annotation']
annotated_with = relationIDs['annotated_with']
for i, annotation_id in enumerate(co_annotation_ids):
tables.append("""
class_instance a%s,
class_instance_class_instance cc%s""" % (i, i))
where.append("""
AND a%s.project_id = %s
AND a%s.class_id = %s
AND cc%s.class_instance_a = neuron.id
AND cc%s.relation_id = %s
AND cc%s.class_instance_b = a%s.id
AND a%s.id = '%s'
""" % (i, project_id,
i, annotation_class,
i,
i, annotated_with,
i, i,
i, annotation_id))
select = """
SELECT DISTINCT
a.id,
a.name,
(SELECT username FROM auth_user, class_instance_class_instance cici
WHERE cici.class_instance_b = cc.id
AND cici.user_id = auth_user.id
ORDER BY cici.edition_time DESC LIMIT 1) AS "last_user",
(SELECT MAX(edition_time) FROM class_instance_class_instance cici WHERE cici.class_instance_b = a.id) AS "last_used",
(SELECT count(*) FROM class_instance_class_instance cici WHERE cici.class_instance_b = a.id) AS "num_usage"
"""
rest = """
FROM
class_instance a,
class_instance_class_instance cc,
class_instance neuron,
%s
WHERE
neuron.class_id = %s
AND a.class_id = %s
AND a.project_id = %s
AND cc.class_instance_a = neuron.id
AND cc.relation_id = %s
AND cc.class_instance_b = a.id
%s
""" % (',\n'.join(tables),
classIDs['neuron'],
annotation_class,
project_id,
annotated_with,
''.join(where))
return select, rest
@api_view(['GET', 'POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def list_annotations(request:HttpRequest, project_id=None) -> JsonResponse:
"""List annotations matching filtering criteria that are currently in use.
The result set is the intersection of annotations matching criteria (the
criteria are conjunctive) unless stated otherwise.
---
parameters:
- name: annotations
description: A list of (meta) annotations with which which resulting annotations should be annotated with.
paramType: form
type: array
items:
type: integer
description: An annotation ID
- name: annotates
description: A list of entity IDs (like annotations and neurons) that should be annotated by the result set.
paramType: form
type: array
items:
type: integer
description: An entity ID
- name: parallel_annotations
description: A list of annotation that have to be used alongside the result set.
paramType: form
type: array
items:
type: integer
description: An annotation ID
- name: user_id
description: Result annotations have to be used by this user.
paramType: form
type: integer
- name: neuron_id
description: Result annotations will annotate this neuron.
paramType: form
type: integer
- name: skeleton_id
description: Result annotations will annotate the neuron modeled by this skeleton.
paramType: form
type: integer
- name: ignored_annotations
description: A list of annotation names that will be excluded from the result set.
paramType: form
type: array
items:
type: string
- name: if_modified_since
description: |
Works only if <simple> is True. Return 304 response if there is no
newer content with respect to the passed in UTC date in ISO format.
paramType: form
type: string
models:
annotation_user_list_element:
id: annotation_user_list_element
properties:
id:
type: integer
name: id
description: The user id
required: true
name:
type: string
name: name
description: The user name
required: true
annotation_list_element:
id: annotation_list_element
description: Represents one annotation along with its users.
properties:
name:
type: string
description: The name of the annotation
required: true
id:
type: integer
description: The id of the annotation
required: true
users:
type: array
description: A list of users
required: true
items:
$ref: annotation_user_list_element
type:
- type: array
items:
$ref: annotation_list_element
required: true
"""
cursor = connection.cursor()
classes = get_class_to_id_map(project_id, ('annotation',), cursor)
# If there is no 'annotation' class, there can't be annotations
if 'annotation' not in classes:
return JsonResponse({'annotations': []})
if request.method == 'GET':
simple = get_request_bool(request.GET, 'simple', False)
relations = get_relation_to_id_map(project_id, ('annotated_with',), cursor)
if_modified_since = request.GET.get('if_modified_since')
# In case a simple representation should be returned, return a simple
# list of name - ID mappings.
if simple:
# If there is no newer annotation data since the passed-in date, return
# a 304 response.
if if_modified_since:
if_modified_since = dateutil.parser.parse(if_modified_since)
cursor.execute("""
SELECT EXISTS(
SELECT 1 FROM class_instance
WHERE edition_time > %(date)s
AND class_id = %(annotation_class_id)s
)
""", {
'date': if_modified_since,
'annotation_class_id': classes['annotation'],
})
new_data_exists = cursor.fetchone()[0]
if not new_data_exists:
return HttpResponse(status=304)
cursor.execute("""
SELECT row_to_json(wrapped)::text
FROM (
SELECT COALESCE(array_to_json(array_agg(row_to_json(annotation))), '[]'::json) AS annotations
FROM (
SELECT ci.id, ci.name
FROM class_instance ci
WHERE project_id = %(project_id)s
AND class_id = %(annotation_class_id)s
) annotation
) wrapped
""", {
'project_id': project_id,
'annotation_class_id': classes['annotation'],
})
annotation_json_text = cursor.fetchone()[0]
return HttpResponse(annotation_json_text, content_type='application/json')
cursor.execute('''
SELECT DISTINCT ON (ci.id, u.id) ci.name, ci.id, u.id, u.username
FROM class_instance ci
LEFT OUTER JOIN class_instance_class_instance cici
ON (ci.id = cici.class_instance_b)
LEFT OUTER JOIN auth_user u
ON (cici.user_id = u.id)
WHERE (ci.class_id = %s AND (cici.relation_id = %s OR cici.id IS NULL));
''',
(classes['annotation'], relations['annotated_with']))
annotation_tuples = cursor.fetchall()
elif request.method == 'POST':
annotation_query = create_annotation_query(project_id, request.POST)
annotation_tuples = annotation_query.distinct().values_list('name', 'id',
'cici_via_b__user__id', 'cici_via_b__user__username')
else:
raise ValueError("Unsupported HTTP method")
# Create a set mapping annotation names to its users
ids = {}
annotation_dict:Dict[Any, List[Dict]] = {}
for annotation, aid, uid, username in annotation_tuples:
ids[aid] = annotation
if aid not in annotation_dict: # With these two conditionals, we make sure an empty entry exists even if uid is None.
annotation_dict[aid] = []
if uid is not None:
annotation_dict[aid].append({'id': uid, 'name': username})
# Flatten dictionary to list
annotations = tuple({'name': ids[aid], 'id': aid, 'users': users} for aid, users in annotation_dict.items())
return JsonResponse({'annotations': annotations})
def _fast_co_annotations(request:HttpRequest, project_id:Union[int,str], display_start, display_length) -> JsonResponse:
classIDs = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relationIDs = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
co_annotation_ids = set(get_request_list(request.POST, 'parallel_annotations', [], map_fn=int))
select, rest = generate_co_annotation_query(int(project_id), co_annotation_ids, classIDs, relationIDs)
entries = []
search_term = request.POST.get('search', '').strip()
if search_term:
rest += "\nAND a.name ~ %s" # django will escape and quote the string
entries.append(search_term)
# Sorting?
order = get_request_list(request.POST, 'order', default=[])
should_sort = len(order) > 0
sorter = ''
if should_sort:
column_count = len(order)
sorting_directions = [o[1] for o in order]
sorting_directions = list(map(lambda d: 'DESC' if d.upper() == 'DESC' else 'ASC',
sorting_directions))
fields = ['name', 'id', 'last_used', 'num_usage', 'last_user']
sorting_cols = [fields[int(o[0])] for o in order]
sorter = '\nORDER BY ' + ','.join('%s %s' % u for u in zip(sorting_cols, sorting_directions))
cursor = connection.cursor()
cursor.execute("SELECT count(DISTINCT a.id) " + rest, entries)
num_records = cursor.fetchone()[0]
response = {
'recordsTotal': num_records,
'recordsFiltered': num_records,
}
rest += sorter
rest += '\nLIMIT %s OFFSET %s'
entries.append(display_length) # total to return
entries.append(display_start) # offset
cursor.execute(select + rest, entries)
# 0: a.id
# 1: a.name
# 2: last_user
# 3: last_used
# 4: num_usage
data = []
for row in cursor.fetchall():
last_used = row[3]
if last_used:
last_used = last_used.strftime("%Y-%m-%d %H:%M:%S")
else:
last_used = 'never'
data.append([row[1], # Annotation name
last_used,
row[4], # Last use
row[2], # Last annotator
row[0]])
response['data'] = data
return JsonResponse(response)
@requires_user_role([UserRole.Browse])
def list_annotations_datatable(request:HttpRequest, project_id=None) -> JsonResponse:
display_start = int(request.POST.get('start', 0))
display_length = int(request.POST.get('length', -1))
if display_length < 0:
display_length = 2000 # Default number of result rows
# Speed hack
if 'parallel_annotations[0]' in request.POST:
return _fast_co_annotations(request, project_id, display_start, display_length)
annotation_query = create_annotation_query(project_id, request.POST)
order = get_request_list(request.POST, 'order', default=[])
should_sort = len(order) > 0
search_term = request.POST.get('search', '')
# Additional information should also be constrained by neurons and user
# names. E.g., when viewing the annotation list for a user, the usage count
# should only display the number of times the user has used an annotation.
conditions = ""
if request.POST.get('neuron_id'):
conditions += "AND cici.class_instance_a = %s " % \
request.POST.get('neuron_id')
if request.POST.get('user_id'):
conditions += "AND cici.user_id = %s " % \
request.POST.get('user_id')
# Add (last) annotated on time
annotation_query = annotation_query.extra(
select={'annotated_on': 'SELECT MAX(cici.creation_time) FROM ' \
'class_instance_class_instance cici WHERE ' \
'cici.class_instance_b = class_instance.id %s' % conditions})
# Add user ID of last user
annotation_query = annotation_query.extra(
select={'last_user': 'SELECT auth_user.id FROM auth_user, ' \
'class_instance_class_instance cici ' \
'WHERE cici.class_instance_b = class_instance.id ' \
'AND cici.user_id = auth_user.id %s' \
'ORDER BY cici.edition_time DESC LIMIT 1' % conditions})
# Add usage count
annotation_query = annotation_query.extra(
select={'num_usage': 'SELECT COUNT(*) FROM ' \
'class_instance_class_instance cici WHERE ' \
'cici.class_instance_b = class_instance.id %s' % conditions})
if len(search_term) > 0:
annotation_query = annotation_query.filter(name__iregex=search_term)
if should_sort:
column_count = len(order)
sorting_directions = [o[1] for o in order]
sorting_directions = list(map(lambda d: '-' if d.upper() == 'DESC' else '',
sorting_directions))
fields = ['name', 'id', 'annotated_on', 'num_usage', 'last_user']
sorting_cols = [fields[int(o[0])] for o in order]
annotation_query = annotation_query.extra(order_by=[di + col for (di, col) in zip(
sorting_directions, sorting_cols)])
# We only require ID, name, last used and usage number
annotation_query = annotation_query.values_list(
'id', 'name', 'annotated_on', 'num_usage', 'last_user')
# Make sure we get a distinct result (which otherwise might not be the case
# due to the JOINS that are made).
annotation_query = annotation_query.distinct()
# num_records = annotation_query.count() # len(annotation_query)
num_records = len(annotation_query)
response:Dict[str, Any] = {
'recordsTotal': num_records,
'recordsFiltered': num_records,
'data': []
}
for annotation in annotation_query[display_start:display_start + display_length]:
# Format last used time
if annotation[2]:
annotated_on = annotation[2].isoformat()
else:
annotated_on = 'never'
# Build datatable data structure
response['data'].append([
annotation[1], # Name
annotated_on, # Annotated on
annotation[3], # Usage
annotation[4], # Annotator ID
annotation[0]]) # ID
return JsonResponse(response)
@api_view(['POST'])
@requires_user_role([UserRole.Browse])
def annotations_for_skeletons(request:HttpRequest, project_id=None) -> JsonResponse:
"""Get annotations and who used them for a set of skeletons.
This method focuses only on annotations linked to skeletons and is likely to
be faster than the general query. Returns an object with two fields:
"annotations", which is itself an object with annotation IDs as fields,
giving access to the corresponding annotation names. And the field
"skeletons" is also an object, mapping skeleton IDs to lists of
annotation-annotator ID pairs. Also, as JSON separator a colon is used
instead of a comma.
---
parameters:
- name: skeleton_ids
description: A list of skeleton IDs which are annotated by the resulting annotations.
paramType: form
type: array
items:
type: integer
description: A skeleton ID
"""
skids = tuple(get_request_list(request.POST, 'skeleton_ids', [], map_fn=int))
cursor = connection.cursor()
cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='annotated_with'" % int(project_id))
annotated_with_id = cursor.fetchone()[0]
# Select pairs of skeleton_id vs annotation name
cursor.execute('''
SELECT skeleton_neuron.class_instance_a,
annotation.id, annotation.name, neuron_annotation.user_id
FROM class_instance_class_instance skeleton_neuron,
class_instance_class_instance neuron_annotation,
class_instance annotation
WHERE skeleton_neuron.class_instance_a IN (%s)
AND skeleton_neuron.class_instance_b = neuron_annotation.class_instance_a
AND neuron_annotation.relation_id = %s
AND neuron_annotation.class_instance_b = annotation.id
''' % (",".join(map(str, skids)), annotated_with_id))
# Group by skeleton ID
m:DefaultDict[Any, List] = defaultdict(list)
a = dict()
for skid, aid, name, uid in cursor.fetchall():
m[skid].append({'id': aid, 'uid': uid})
a[aid] = name
return JsonResponse({
'skeletons': m,
'annotations': a
}, json_dumps_params={'separators': (',', ':')})
@api_view(['POST'])
@requires_user_role([UserRole.Browse])
def annotations_for_entities(request:HttpRequest, project_id=None) -> JsonResponse:
"""Query annotations linked to a list of objects.
These objects can for instance be neurons, annotations or stack groups. From
a database perspective, these objects are class instances.
Returned is an object with the fields "entities" and "annotations". The
former is an object mapping an entity ID to a list of annotations. Each
annotation is represented by an object containing its "id" and "uid", the
user who annotated it. The latter maps annotation IDs to annotation names.
For instance::
{ "entities": { "42": [{id: 1, uid: 12}, {id: 3, uid: 14}] }, "annotations": { 12: "example1", 14: "example2" } }
---
parameters:
- name: object_ids
description: A list of object IDs for which annotations should be returned.
paramType: form
type: array
allowMultiple: true
items:
type: integer
description: A skeleton ID
"""
# Get 'annotated_with' relation ID
object_ids = tuple(get_request_list(request.POST, 'object_ids', [], map_fn=int))
entity_map, annotation_map = get_annotations_for_entities(project_id, object_ids)
return JsonResponse({
'entities': entity_map,
'annotations': annotation_map
}, json_dumps_params={'separators': (',', ':')})
def get_annotations_for_entities(project_id, object_ids):
cursor = connection.cursor()
cursor.execute("""
SELECT id FROM relation
WHERE project_id=%s AND
relation_name='annotated_with'""" % int(project_id))
annotated_with_id = cursor.fetchone()[0]
# Select pairs of skeleton_id vs annotation name
cursor.execute('''
SELECT entity_annotation.class_instance_a,
annotation.id, annotation.name, entity_annotation.user_id
FROM class_instance_class_instance entity_annotation,
class_instance annotation
WHERE entity_annotation.class_instance_a IN (%s)
AND entity_annotation.relation_id = %s
AND entity_annotation.class_instance_b = annotation.id
''' % (",".join(map(str, object_ids)), annotated_with_id))
# Group by entity ID
entities:DefaultDict[Any, List] = defaultdict(list)
annotations = dict()
for eid, aid, name, uid in cursor.fetchall():
entities[eid].append({'id': aid, 'uid': uid})
annotations[aid] = name
return entities, annotations
def annotations_for_skeleton(project_id:Union[int,str], skeleton_id, relations=None, classes=None) -> Dict:
"""Get a a dictionary mapping annotations on the neuron modeled by the
passed in skeleton to the respective annotators.
"""
if not relations:
relations = get_relation_to_id_map(project_id)
if not classes:
classes = get_class_to_id_map(project_id)
cursor = connection.cursor()
cursor.execute("""
SELECT a.name, cici.user_id
FROM class_instance a
JOIN class_instance_class_instance cici
ON a.id = cici.class_instance_b
JOIN class_instance neuron
ON neuron.id = cici.class_instance_a
JOIN class_instance_class_instance skeleton_neuron
ON cici.class_instance_a = skeleton_neuron.class_instance_b
JOIN class_instance skeleton
ON skeleton.id = skeleton_neuron.class_instance_a
WHERE cici.project_id = %(project_id)s
AND a.class_id = %(annotation_class)s
AND cici.relation_id = %(annotated_with_rel)s
AND neuron.class_id = %(neuron_class)s
AND skeleton_neuron.relation_id = %(model_of_rel)s
AND skeleton_neuron.class_instance_a = %(skeleton_id)s
""", {
'project_id': project_id,
'annotation_class': classes['annotation'],
'annotated_with_rel': relations['annotated_with'],
'neuron_class': classes['neuron'],
'model_of_rel': relations['model_of'],
'skeleton_id': skeleton_id,
})
return dict(cursor.fetchall())
def clear_annotations(project_id:Union[int,str], skeleton_id, relations=None, classes=None) -> List[int]:
"""Remove all annotations from a skeleton.
"""
if not relations:
relations = get_relation_to_id_map(project_id)
if not classes:
classes = get_class_to_id_map(project_id)
cursor = connection.cursor()
cursor.execute("""
DELETE FROM class_instance_class_instance cici
USING class_instance a, class_instance neuron,
class_instance_class_instance skeleton_neuron,
class_instance skeleton
WHERE a.id = cici.class_instance_b
AND neuron.id = cici.class_instance_a
AND cici.class_instance_a = skeleton_neuron.class_instance_b
AND skeleton.id = skeleton_neuron.class_instance_a
AND cici.project_id = %(project_id)s
AND a.class_id = %(annotation_class)s
AND cici.relation_id = %(annotated_with_rel)s
AND neuron.class_id = %(neuron_class)s
AND skeleton_neuron.relation_id = %(model_of_rel)s
AND skeleton_neuron.class_instance_a = %(skeleton_id)s
RETURNING cici.id
""", {
'project_id': project_id,
'annotation_class': classes['annotation'],
'annotated_with_rel': relations['annotated_with'],
'neuron_class': classes['neuron'],
'model_of_rel': relations['model_of'],
'skeleton_id': skeleton_id,
})
return list([r[0] for r in cursor.fetchall()])
|
catmaid/CATMAID
|
django/applications/catmaid/control/annotation.py
|
Python
|
gpl-3.0
| 76,984
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0111_botuserstatedata'),
]
operations = [
migrations.RunSQL(
'''
CREATE INDEX zerver_mutedtopic_stream_topic
ON zerver_mutedtopic
(stream_id, upper(topic_name))
''',
reverse_sql='DROP INDEX zerver_mutedtopic_stream_topic;',
),
]
|
brainwane/zulip
|
zerver/migrations/0112_index_muted_topics.py
|
Python
|
apache-2.0
| 446
|
from tests.support.asserts import assert_success
from . import opener, window_name
def new_window(session, type_hint=None):
return session.transport.send(
"POST", "session/{session_id}/window/new".format(**vars(session)),
{"type": type_hint})
def test_payload(session):
original_handles = session.handles
response = new_window(session, type_hint="window")
value = assert_success(response)
handles = session.handles
assert len(handles) == len(original_handles) + 1
assert value["handle"] in handles
assert value["handle"] not in original_handles
assert value["type"] == "window"
def test_keeps_current_window_handle(session):
original_handle = session.window_handle
response = new_window(session, type_hint="window")
value = assert_success(response)
assert value["type"] == "window"
assert session.window_handle == original_handle
def test_opens_about_blank_in_new_window(session, inline):
url = inline("<p>foo")
session.url = url
response = new_window(session, type_hint="window")
value = assert_success(response)
assert value["type"] == "window"
assert session.url == url
session.window_handle = value["handle"]
assert session.url == "about:blank"
def test_sets_no_window_name(session):
response = new_window(session, type_hint="window")
value = assert_success(response)
assert value["type"] == "window"
session.window_handle = value["handle"]
assert window_name(session) == ""
def test_sets_no_opener(session):
response = new_window(session, type_hint="window")
value = assert_success(response)
assert value["type"] == "window"
session.window_handle = value["handle"]
assert opener(session) is None
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/webdriver/tests/new_window/new_window.py
|
Python
|
bsd-3-clause
| 1,766
|
# tests.test_text.test_freqdist
# Tests for the frequency distribution visualization
#
# Author: Rebecca Bilbro
# Created: 2017-03-22 15:27
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_freqdist.py [bd9cbb9] $
"""
Tests for the frequency distribution text visualization
"""
##########################################################################
## Imports
##########################################################################
import pytest
from yellowbrick.datasets import load_hobbies
from yellowbrick.text.freqdist import *
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from sklearn.feature_extraction.text import CountVectorizer
##########################################################################
## Data
##########################################################################
corpus = load_hobbies()
##########################################################################
## FreqDist Tests
##########################################################################
class FreqDistTests(VisualTestCase):
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892"
)
def test_integrated_freqdist(self):
"""
Assert no errors occur during freqdist integration
"""
vectorizer = CountVectorizer()
docs = vectorizer.fit_transform(corpus.data)
features = vectorizer.get_feature_names()
visualizer = FreqDistVisualizer(features)
visualizer.fit(docs)
visualizer.finalize()
self.assert_images_similar(visualizer)
|
pdamodaran/yellowbrick
|
tests/test_text/test_freqdist.py
|
Python
|
apache-2.0
| 1,675
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Marble Works',
'version' : '1.0',
'author' : 'Antares Consulting',
'description': 'Definicion de Productos para Marmoleria.',
'category' : 'Product Marble',
'website' : 'http://www.antaresconsulting.com.ar',
'depends' :[
'web',
'stock',
'sale',
'purchase',
'hr',
'l10n_ar_base',
'l10n_ar_base_vat',
],
'data' :[
'data/heavy_data.xml',
'data/users_data.xml',
'data/products_data.xml',
'data/locations_data.xml',
'data/picking_data.xml',
'security/groups_security.xml',
'security/users_security.xml',
'security/ir.model.access.csv',
'views/marble_login_view.xml',
'views/product_dimension_view.xml',
'views/product_view.xml',
'views/res_partner_view.xml',
'views/stock_view.xml',
'views/stock_change_product_qty_view.xml',
'views/stock_transfer_details.xml',
'views/hide_fields_view.xml',
'views/marble_actions.xml',
'views/marble_menu.xml',
],
'css': [
'static/src/css/style.css',
],
'auto_install':False,
'installable': True,
'active' : True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
AntaresConsulting/odoo-marble
|
product_marble/__openerp__.py
|
Python
|
gpl-2.0
| 2,342
|
# Copyright (c) 2018, Intel Corporation.
# SPDX-License-Identifier: BSD-3-Clause
# Create the ixp_default.conf file used by installer based on the ixp_default.h
import argparse
delete_list = ["\"", "\\n"]
parser = argparse.ArgumentParser(description='The default ini conf file generator.')
parser.add_argument('src_file', help='input file name')
parser.add_argument('dest_file', help='output file name')
args = parser.parse_args()
infile = open(args.src_file, 'r')
outfile = open(args.dest_file, 'w')
for line in infile:
if line.rstrip():
for word in delete_list:
line = line.replace(word, "")
outfile.write(line)
infile.close()
outfile.close()
|
intel/ipmctl
|
src/os/ini/ini_auto_gen_default_config.py
|
Python
|
bsd-3-clause
| 682
|
"""
check issue #8
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ['main']
import sys
import traceback
import signal
import time
import platform
import random
import pyownet
from pyownet import protocol
from . import (HOST, PORT)
A = 1e-3 # alarm interval
B = 1e-1 # max sleep in busy wait
def dummy_handler(signum, frame):
"""dummy signal handler"""
def main():
print(platform.python_implementation(), platform.python_version())
print(platform.system(), platform.release())
print(pyownet.__name__, pyownet.__version__)
owp = protocol.proxy(HOST, PORT, flags=protocol.FLG_UNCACHED)
print(owp, 'vers.',
protocol.bytes2str(owp.read(protocol.PTH_VERSION))
if owp.present(protocol.PTH_VERSION) else 'unknown')
signal.signal(signal.SIGALRM, dummy_handler)
tic = time.time()
signal.setitimer(signal.ITIMER_REAL, A, A)
try:
count = 0
inter = 0
while count < 10000:
count += 1
try:
_ = owp.dir()
except protocol.Error as exc:
(_, val, tb) = sys.exc_info()
assert val is exc
inter += 1
trs = traceback.extract_tb(tb)
print(count, exc, trs[-1][0], trs[-1][1])
time.sleep(random.uniform(0., B)) # can be interrupted
except KeyboardInterrupt:
print()
signal.setitimer(signal.ITIMER_REAL, 0, A)
elt = time.time() - tic
print('{:d} errors / {:d} calls in {:.1f}s'.format(inter, count, elt))
if __name__ == '__main__':
main()
|
miccoli/pyownet
|
tests/eintr.py
|
Python
|
lgpl-3.0
| 1,660
|
import os
from urllib.parse import urlparse
default_db = 'postgres://user:pass@127.0.0.1:5432/dbname'
DATABASE_URL = os.environ.get('DATABASE_URL', default_db)
url = urlparse(DATABASE_URL)
DATABASE_HOST = '{}:{}'.format(url.hostname, url.port)
DATABASE_NAME = url.path[1:]
DATABASE_USERNAME = url.username
DATABASE_PASSWORD = url.password
|
itkpi/events-storage
|
events_service/settings.py
|
Python
|
apache-2.0
| 342
|
#!/usr/bin/env python
from unittest import TestCase
from fix_task.humans import Human, Boy, Girl
from fix_task.dances import HipHop, Pop, Electrodance
from fix_task.songs import Song
__author__ = 'litleleprikon'
class TestHuman(TestCase):
def setUp(self):
self.human = Human(name="Emil", dances=(HipHop(), Electrodance()))
def test_init(self):
assert self.human.name == "Emil"
assert isinstance(self.human._dances, tuple) and len(self.human._dances) == 2
def test_can_dance_method(self):
assert self.human.can_dance(Song(genre=HipHop()))
assert not self.human.can_dance(Song(genre=Pop()))
def test_new_song_method(self):
self.human.new_song_started(Song(name="Pretty song", genre=HipHop()))
assert self.human.state == "Dance"
def test_state(self):
assert self.human.state == "Go to bar"
def test_str_method(self):
format_string = "Human {}, status: {}, can dance: {}"
assert str(self.human) == format_string.format(self.human.name, self.human.state,
', '.join(x.name for x in self.human._dances))
def test_boy_str_method(self):
boy = Boy()
assert str(boy) == "Boy {}, status: {}, can dance: {}".format(boy.name, boy.state,
', '.join(x.name for x in boy._dances))
def test_girl_str_method(self):
girl = Girl()
assert str(girl) == "Girl {}, status: {}, can dance: {}".format(girl.name, girl.state,
', '.join(x.name for x in girl._dances))
|
litleleprikon/FIXTestTask
|
tests/test_human.py
|
Python
|
apache-2.0
| 1,688
|
from datetime import datetime
class GKReview:
# TODO add platform(s)
def __init__(self, reviewer="default", title="default", review_link="default", rating=-1, date="17/07/1994"):
self.reviewer = reviewer
self.title = title
self.link = review_link
self.rating = int(rating)
self.date = datetime.strptime(date, '%d/%m/%Y')
self.content = ""
def print_review(self):
print(self.title, " ", self.rating, " by ", self.reviewer)
def get_year(self):
return self.date.year
def get_rating(self):
return self.rating
def get_reviewer(self):
return self.reviewer
def get_metric(self, metric):
if metric == 'rating':
return self.rating
elif metric == 'length':
return len(self.content)
elif metric == 'wordcount': # Simple implementation
return len(self.content.split(" "))
else:
return self.rating
|
arthurdk/gk-analysis
|
GKReview.py
|
Python
|
mit
| 981
|
#!/usr/bin/python
from distutils.core import setup
setup(name='kodicmd',
description='Kodi command-line interface',
author='Tobias D. Oestreicher',
author_email='lists@oestreicher.com.de',
url='https://github.com/tobias-d-oe/kodicmd',
version='0.0.1',
packages=['kodicmd'],
scripts=['kodicmd/kodicmd']
)
|
tobias-d-oe/kodicmd
|
setup.py
|
Python
|
gpl-3.0
| 349
|
from flask import Blueprint
from flask import make_response, Markup, send_from_directory, current_app
from flask import request, redirect, url_for, render_template, g, abort
from werkzeug.http import http_date, generate_etag
from redwind import imageproxy
from redwind import util
from redwind.extensions import db
from redwind.models import Post, Tag, get_settings
import datetime
import flask.ext.login as flask_login
import json
import os
import pytz
import re
import sqlalchemy
import sqlalchemy.orm
import sqlalchemy.sql
import urllib.parse
TIMEZONE = pytz.timezone('US/Pacific')
POST_TYPES = [
('article', 'articles', 'All Articles'),
('note', 'notes', 'All Notes'),
('like', 'likes', 'All Likes'),
('share', 'shares', 'All Shares'),
('reply', 'replies', 'All Replies'),
('checkin', 'checkins', 'All Check-ins'),
('photo', 'photos', 'All Photos'),
('bookmark', 'bookmarks', 'All Bookmarks'),
('event', 'events', 'All Events'),
]
POST_TYPE_RULE = '<any({}):post_type>'.format(
','.join(tup[0] for tup in POST_TYPES))
PLURAL_TYPE_RULE = '<any({}):plural_type>'.format(
','.join(tup[1] for tup in POST_TYPES))
DATE_RULE = (
'<int:year>/<int(fixed_digits=2):month>/<int(fixed_digits=2):day>/<index>')
BEFORE_TS_FORMAT = '%Y%m%d%H%M%S'
AUTHOR_PLACEHOLDER = 'img/users/placeholder.png'
views = Blueprint('views', __name__)
@views.context_processor
def inject_settings_variable():
return {
'settings': get_settings()
}
def collect_posts(post_types, before_ts, per_page, tag, search=None,
include_hidden=False):
query = Post.query
query = query.options(
sqlalchemy.orm.subqueryload(Post.tags),
sqlalchemy.orm.subqueryload(Post.mentions),
sqlalchemy.orm.subqueryload(Post.reply_contexts),
sqlalchemy.orm.subqueryload(Post.repost_contexts),
sqlalchemy.orm.subqueryload(Post.like_contexts),
sqlalchemy.orm.subqueryload(Post.bookmark_contexts))
if tag:
query = query.filter(Post.tags.any(Tag.name == tag))
if not include_hidden:
query = query.filter_by(hidden=False)
query = query.filter_by(deleted=False, draft=False)
if post_types:
query = query.filter(Post.post_type.in_(post_types))
if search:
query = query.filter(
sqlalchemy.func.concat(Post.title, ' ', Post.content)
.op('@@')(sqlalchemy.func.plainto_tsquery(search)))
try:
if before_ts:
before_dt = datetime.datetime.strptime(before_ts, BEFORE_TS_FORMAT)
before_dt = TIMEZONE.normalize(TIMEZONE.localize(before_dt))
before_dt = before_dt.astimezone(pytz.utc)
before_dt = before_dt.replace(tzinfo=None)
query = query.filter(Post.published < before_dt)
except ValueError:
current_app.logger.warn('Could not parse before timestamp: %s',
before_ts)
query = query.order_by(Post.published.desc())
query = query.limit(per_page)
posts = query.all()
posts = [post for post in posts if check_audience(post)]
if posts:
last_ts = posts[-1].published
last_ts = pytz.utc.localize(last_ts)
last_ts = TIMEZONE.normalize(last_ts.astimezone(TIMEZONE))\
.replace(tzinfo=None)
view_args = request.view_args.copy()
view_args['before_ts'] = last_ts.strftime(BEFORE_TS_FORMAT)
for k, v in request.args.items():
view_args[k] = v
older = url_for(request.endpoint, **view_args)
else:
older = None
return posts, older
def collect_upcoming_events():
now = datetime.datetime.utcnow()
events = Post.query\
.filter(Post.post_type == 'event')\
.filter(Post.end_utc > now.isoformat('T'))\
.order_by(Post.start_utc)\
.all()
return events
# Font sizes in em. Maybe should be configurable
MIN_TAG_SIZE = 1.0
MAX_TAG_SIZE = 4.0
MIN_TAG_COUNT = 2
def render_tags(title, tags):
if tags:
counts = [tag['count'] for tag in tags]
mincount, maxcount = min(counts), max(counts)
for tag in tags:
if maxcount > mincount:
tag['size'] = (MIN_TAG_SIZE +
(MAX_TAG_SIZE - MIN_TAG_SIZE) *
(tag['count'] - mincount) /
(maxcount - mincount))
else:
tag['size'] = MIN_TAG_SIZE
return util.render_themed('tags.jinja2', tags=tags, title=title,
max_tag_size=MAX_TAG_SIZE)
def render_posts(title, posts, older, events=None, template='posts.jinja2'):
atom_args = request.view_args.copy()
atom_args.update({'feed': 'atom', '_external': True})
atom_url = url_for(request.endpoint, **atom_args)
atom_title = title or 'Stream'
rv = make_response(
util.render_themed(template, posts=posts, title=title,
older=older, atom_url=atom_url,
atom_title=atom_title, events=events))
last_modified = max((p.updated for p in posts if p.updated), default=None)
if last_modified:
rv.headers['Last-Modified'] = http_date(last_modified)
rv.headers['Etag'] = generate_etag(rv.get_data())
rv.make_conditional(request)
return rv
def render_posts_atom(title, feed_id, posts):
rv = make_response(
render_template('posts.atom', title=title, feed_id=feed_id,
posts=posts))
rv.headers['Content-Type'] = 'application/atom+xml; charset=utf-8'
last_modified = max((p.updated for p in posts if p.updated), default=None)
if last_modified:
rv.headers['Last-Modified'] = http_date(last_modified)
rv.headers['Etag'] = generate_etag(rv.get_data())
rv.make_conditional(request)
return rv
@views.route('/')
@views.route('/before-<before_ts>')
def index(before_ts=None):
post_types = [type[0] for type in POST_TYPES if type[0] != 'event']
posts, older = collect_posts(
post_types, before_ts, int(get_settings().posts_per_page),
None, include_hidden=False)
if request.args.get('feed') == 'atom':
return render_posts_atom('Stream', 'index.atom', posts)
resp = make_response(
render_posts('Stream', posts, older,
events=collect_upcoming_events(),
template='home.jinja2'))
if 'PUSH_HUB' in current_app.config:
resp.headers.add('Link', '<{}>; rel="hub"'.format(
current_app.config['PUSH_HUB']))
resp.headers.add('Link', '<{}>; rel="self"'.format(
url_for('.index', _external=True)))
return resp
@views.route('/everything')
@views.route('/everything/before-<before_ts>')
def everything(before_ts=None):
posts, older = collect_posts(
None, before_ts, int(get_settings().posts_per_page), None,
include_hidden=True)
if request.args.get('feed') == 'atom':
return render_posts_atom('Everything', 'everything.atom', posts)
return render_posts('Everything', posts, older)
@views.route('/' + PLURAL_TYPE_RULE)
@views.route('/' + PLURAL_TYPE_RULE + '/before-<before_ts>')
def posts_by_type(plural_type, before_ts=None):
post_type, _, title = next(tup for tup in POST_TYPES
if tup[1] == plural_type)
posts, older = collect_posts(
(post_type,), before_ts, int(get_settings().posts_per_page), None,
include_hidden=True)
if request.args.get('feed') == 'atom':
return render_posts_atom(title, plural_type + '.atom', posts)
return render_posts(title, posts, older)
@views.route('/tags')
def tag_cloud():
query = db.session.query(
Tag.name, sqlalchemy.func.count(Post.id)
).join(Tag.posts)
query = query.filter(sqlalchemy.sql.expression.not_(Post.deleted))
if not flask_login.current_user.is_authenticated():
query = query.filter(sqlalchemy.sql.expression.not_(Post.draft))
query = query.group_by(Tag.id).order_by(Tag.name)
query = query.having(sqlalchemy.func.count(Post.id) >= MIN_TAG_COUNT)
tagdict = {}
for name, count in query.all():
tagdict[name] = tagdict.get(name, 0) + count
tags = [
{"name": name, "count": tagdict[name]}
for name in sorted(tagdict)
]
return render_tags("Tags", tags)
@views.route('/tags/<tag>')
@views.route('/tags/<tag>/before-<before_ts>')
def posts_by_tag(tag, before_ts=None):
posts, older = collect_posts(
None, before_ts, int(get_settings().posts_per_page), tag,
include_hidden=True)
title = '#' + tag
if request.args.get('feed') == 'atom':
return render_posts_atom(title, 'tag-' + tag + '.atom', posts)
return render_posts(title, posts, older)
@views.route('/search')
@views.route('/search/before-<before_ts>')
def search(before_ts=None):
q = request.args.get('q')
if not q:
abort(404)
posts, older = collect_posts(
None, before_ts, int(get_settings().posts_per_page), None,
include_hidden=True, search=q)
return render_posts('Search: ' + q, posts, older)
@views.route('/all.atom')
def all_atom():
return redirect(url_for('.everything', feed='atom'))
@views.route('/updates.atom')
def updates_atom():
return redirect(url_for('.index', feed='atom'))
@views.route('/articles.atom')
def articles_atom():
return redirect(
url_for('.posts_by_type', plural_type='articles', feed='atom'))
def check_audience(post):
if not post.audience:
# all posts public by default
return True
if flask_login.current_user.is_authenticated():
# admin user can see everything
return True
if flask_login.current_user.is_anonymous():
# anonymous users can't see stuff
return False
# check that their username is listed in the post's audience
current_app.logger.debug(
'checking that logged in user %s is in post audience %s',
flask_login.current_user.get_id(), post.audience)
return flask_login.current_user.get_id() in post.audience
@views.route('/' + POST_TYPE_RULE + '/' + DATE_RULE + '/files/<filename>')
def post_associated_file_by_historic_path(post_type, year, month, day,
index, filename):
post = Post.load_by_historic_path('{}/{}/{:02d}/{:02d}/{}'.format(
post_type, year, month, day, index))
if not post:
abort(404)
return redirect('/{}/files/{}'.format(post.path, filename))
@views.route('/<int:year>/<int(fixed_digits=2):month>/<slug>/files/<filename>')
def post_attachment(year, month, slug, filename):
post = Post.load_by_path('{}/{:02d}/{}'.format(year, month, slug))
return render_attachment(post, filename)
@views.route('/drafts/<hash>/files/<filename>')
def draft_attachment(hash, filename):
post = Post.load_by_path('drafts/{}'.format(hash))
return render_attachment(post, filename)
def render_attachment(post, filename):
if not post:
current_app.logger.warn('no post found')
abort(404)
if post.deleted:
abort(410) # deleted permanently
if not check_audience(post):
abort(401) # not authorized TODO a nicer page
attachment = next(
(a for a in post.attachments if a.filename == filename), None)
if not attachment:
current_app.logger.warn('no attachment named %s', filename)
abort(404)
current_app.logger.debug('image file path: %s. request args: %s',
attachment.disk_path, request.args)
if not os.path.exists(attachment.disk_path):
current_app.logger.warn('source path does not exist %s',
attachment.disk_path)
abort(404)
if current_app.debug:
_, ext = os.path.splitext(attachment.disk_path)
return send_from_directory(
os.path.dirname(attachment.disk_path),
os.path.basename(attachment.disk_path),
mimetype=attachment.mimetype)
resp = make_response('')
# nginx is configured to serve internal resources directly
resp.headers['X-Accel-Redirect'] = os.path.join(
'/internal_data', attachment.storage_path)
resp.headers['Content-Type'] = attachment.mimetype
del resp.headers['Content-Length']
current_app.logger.debug('response with X-Accel-Redirect %s', resp.headers)
return resp
@views.route('/' + POST_TYPE_RULE + '/' + DATE_RULE, defaults={'slug': None})
@views.route('/' + POST_TYPE_RULE + '/' + DATE_RULE + '/<slug>')
def post_by_date(post_type, year, month, day, index, slug):
post = Post.load_by_historic_path('{}/{}/{:02d}/{:02d}/{}'.format(
post_type, year, month, day, index))
if not post:
abort(404)
return redirect(post.permalink)
@views.route('/<any({}):tag>/<tail>'.format(','.join(util.TAG_TO_TYPE)))
def post_by_short_path(tag, tail):
post = Post.load_by_short_path('{}/{}'.format(tag, tail))
if not post:
abort(404)
return redirect(post.permalink)
@views.route('/<int:year>/<int(fixed_digits=2):month>/<slug>')
def post_by_path(year, month, slug):
post = Post.load_by_path('{}/{:02d}/{}'.format(year, month, slug))
return render_post(post)
@views.route('/drafts/<hash>')
def draft_by_hash(hash):
post = Post.load_by_path('drafts/{}'.format(hash))
return render_post(post)
def render_post(post):
if not post:
abort(404)
if post.deleted:
abort(410) # deleted permanently
if not check_audience(post):
abort(401) # not authorized TODO a nicer page
if post.redirect:
return redirect(post.redirect)
rv = make_response(
util.render_themed('post.jinja2', post=post,
title=post.title_or_fallback))
if post.updated:
rv.headers['Last-Modified'] = http_date(post.updated)
rv.headers['Etag'] = generate_etag(rv.get_data())
rv.make_conditional(request)
return rv
@views.app_template_filter('json')
def to_json(obj):
return Markup(json.dumps(obj))
@views.app_template_filter('approximate_latitude')
def approximate_latitude(loc):
latitude = loc.get('latitude')
if latitude:
return '{:.3f}'.format(latitude)
@views.app_template_filter('approximate_longitude')
def approximate_longitude(loc):
longitude = loc.get('longitude')
return longitude and '{:.3f}'.format(longitude)
@views.app_template_filter('geo_name')
def geo_name(loc):
name = loc.get('name')
if name:
return name
locality = loc.get('locality')
region = loc.get('region')
if locality and region:
return "{}, {}".format(locality, region)
latitude = loc.get('latitude')
longitude = loc.get('longitude')
if latitude and longitude:
return "{:.2f}, {:.2f}".format(float(latitude), float(longitude))
return "Unknown Location"
@views.app_template_filter('isotime')
def isotime_filter(thedate):
if thedate:
thedate = thedate.replace(microsecond=0)
if hasattr(thedate, 'tzinfo') and not thedate.tzinfo:
tz = pytz.timezone(get_settings().timezone)
thedate = pytz.utc.localize(thedate).astimezone(tz)
if isinstance(thedate, datetime.datetime):
return thedate.isoformat('T')
return thedate.isoformat()
@views.app_template_filter('human_time')
def human_time(thedate, alternate=None):
if not thedate:
return alternate
if hasattr(thedate, 'tzinfo') and not thedate.tzinfo:
tz = pytz.timezone(get_settings().timezone)
thedate = pytz.utc.localize(thedate).astimezone(tz)
# limit full time to things that happen "today"
# and datetime.datetime.now(TIMEZONE) - thedate < datetime.timedelta(days=1)):
if (isinstance(thedate, datetime.datetime)):
return thedate.strftime('%B %-d, %Y %-I:%M%P %Z')
return thedate.strftime('%B %-d, %Y')
@views.app_template_filter('datetime_range')
def datetime_range(rng):
start, end = rng
if not start or not end:
return '???'
fmt1 = '%Y %B %-d, %-I:%M%P'
if start.date() == end.date():
fmt2 = '%-I:%M%P %Z'
else:
fmt2 = '%Y %B %-d, %-I:%M%P %Z'
return (
'<time class="dt-start" datetime="{}">{}</time>'
' — <time class="dt-end" datetime="{}">{}</time>'
).format(
isotime_filter(start),
start.strftime(fmt1),
isotime_filter(end),
end.strftime(fmt2)
)
@views.app_template_filter('date')
def date_filter(thedate, first_only=False):
if thedate:
if hasattr(thedate, 'tzinfo') and not thedate.tzinfo:
tz = pytz.timezone(get_settings().timezone)
thedate = pytz.utc.localize(thedate).astimezone(tz)
formatted = thedate.strftime('%B %-d, %Y')
if first_only:
previous = getattr(g, 'previous date', None)
setattr(g, 'previous date', formatted)
if previous == formatted:
return None
return formatted
@views.app_template_filter('time')
def time_filter(thedate):
if thedate:
if hasattr(thedate, 'tzinfo') and not thedate.tzinfo:
tz = pytz.timezone(get_settings().timezone)
thedate = pytz.utc.localize(thedate).astimezone(tz)
if isinstance(thedate, datetime.datetime):
return thedate.strftime('%-I:%M%P %Z')
@views.app_template_filter('pluralize')
def pluralize(number, singular='', plural='s'):
if number == 1:
return singular
else:
return plural
@views.app_template_filter('month_shortname')
def month_shortname(month):
return datetime.date(1990, month, 1).strftime('%b')
@views.app_template_filter('month_name')
def month_name(month):
return datetime.date(1990, month, 1).strftime('%B')
@views.app_template_filter('atom_sanitize')
def atom_sanitize(content):
return Markup.escape(str(content))
@views.app_template_filter('prettify_url')
def prettify_url(*args, **kwargs):
return util.prettify_url(*args, **kwargs)
@views.app_template_filter('domain_from_url')
def domain_from_url(url):
if not url:
return url
return urllib.parse.urlparse(url).netloc
@views.app_template_filter('make_absolute')
def make_absolute(url):
if not url:
return url
return urllib.parse.urljoin(get_settings().site_url, url)
@views.app_template_filter('format_syndication_url')
def format_syndication_url(url, include_rel=True):
fmt = '<a class="u-syndication" '
if include_rel:
fmt += 'rel="syndication" '
fmt += 'href="{}">{} {}</a>'
return Markup(fmt.format(
url, syndication_icon(url), syndication_text(url)))
@views.app_template_filter('syndication_icon')
def syndication_icon(url):
fmt = '<i class="fa {}"></i>'
if util.TWITTER_RE.match(url):
return Markup(fmt.format('fa-twitter'))
if util.FACEBOOK_RE.match(url) or util.FACEBOOK_EVENT_RE.match(url):
return Markup(fmt.format('fa-facebook'))
if util.INSTAGRAM_RE.match(url):
return Markup(fmt.format('fa-instagram'))
if util.FLICKR_RE.match(url):
return Markup(fmt.format('fa-flickr'))
if util.INDIENEWS_RE.match(url):
return Markup(fmt.format('fa-newspaper-o'))
return Markup(fmt.format('fa-paper-plane'))
@views.app_template_filter('syndication_text')
def syndication_text(url):
if util.TWITTER_RE.match(url):
return 'Twitter'
if util.FACEBOOK_RE.match(url) or util.FACEBOOK_EVENT_RE.match(url):
return 'Facebook'
if util.INSTAGRAM_RE.match(url):
return 'Instagram'
return domain_from_url(url)
IMAGE_TAG_RE = re.compile(r'<img([^>]*) src="(https?://[^">]+)"')
@views.app_template_filter('proxy_all')
def proxy_all_filter(html, side=None):
def repl(m):
url = m.group(2)
# don't proxy images that come from this site
if url.startswith(get_settings().site_url):
return m.group(0)
url = url.replace('&', '&')
return '<img{} src="{}"'.format(
m.group(1), imageproxy.imageproxy_filter(url, side))
return IMAGE_TAG_RE.sub(repl, html) if html else html
@views.app_template_filter()
def add_preview(content):
"""If a post ends with the URL of a known media source (youtube,
instagram, etc.), add the content inline.
"""
if any('<' + tag in content for tag in (
'img', 'iframe', 'embed', 'audio', 'video')):
# don't add a preview to a post that already has one
return content
instagram_regex = 'https?://instagram\.com/p/[\w\-]+/?'
vimeo_regex = 'https?://vimeo\.com/(\d+)/?'
youtube_regex = 'https?://(?:(?:www\.)youtube\.com/watch\?v=|youtu\.be/)([\w\-]+)'
img_regex = 'https?://[^\s">]*\.(?:gif|png|jpg)'
m = re.search(instagram_regex, content)
if m:
ig_url = m.group(0)
media_url = urllib.parse.urljoin(ig_url, 'media/?size=l')
return '{}<a href="{}"><img src="{}" /></a>'.format(
content, ig_url, media_url)
m = re.search(vimeo_regex, content)
if m:
# vimeo_url = m.group(0)
vimeo_id = m.group(1)
return (
'{}<iframe src="//player.vimeo.com/video/{}" width="560" '
'height="315" frameborder="0" webkitallowfullscreen '
'mozallowfullscreen allowfullscreen></iframe>'
).format(content, vimeo_id)
m = re.search(youtube_regex, content)
if m:
youtube_id = m.group(1)
return (
'{}<iframe width="560" height="315" '
'src="https://www.youtube.com/embed/{}" frameborder="0" '
'allowfullscreen></iframe>'
).format(content, youtube_id)
m = re.search(img_regex, content)
if m:
return '{}<img src="{}"/>'.format(content, m.group(0))
return content
|
Lancey6/redwind
|
redwind/views.py
|
Python
|
bsd-2-clause
| 21,930
|
layer = iface.activeLayer()
renderer = layer.rendererV2()
#props = layer.rendererV2().symbols()[0].symbolLayer(0).properties()
props = {
"outline_width": "0.3",
"outline_color": "0,0,0,255",
"angle": "0",
"width": "8",
"color": "0,0,0,255",
"svgFile_dd_useexpr": "1",
"svgFile": "/home/jelen/jelen_dta/projekty/qgis_islh/plugin/symbology/10104.svg",
"svgFile_dd_expression": "'/home/jelen/.qgis2/svg/prsi/'+\"PSK_ZNACKA\"+'.svg'",
"outline_width_unit": "MM",
"svg_outline_width_map_unit_scale": "0,0",
"svgFile_dd_field": "",
"pattern_width_unit": "MM",
"svgFile_dd_active": "1",
"outline_width_map_unit_scale": "0,0",
"svg_outline_width_unit": "MM",
"pattern_width_map_unit_scale": "0,0"
}
layer.rendererV2().setSymbol(
QgsFillSymbolV2().appendSymbolLayer(QgsSVGFillSymbolLayer.create(props)))
|
jeleniste/islh_parser
|
svg/por_mapa/symbology.py
|
Python
|
gpl-3.0
| 845
|
"""
k-epsilon fitted turbulence model using truncation of the velocity and water level
Implements TurbulenceKepFitted_core. See parent class for explanation
Runs its parent's methods.
Date: 02-11-2016 (original date: 20-11-2015)
Authors: Y.M. Dijkstra
"""
from TurbulenceKepFitted_core import TurbulenceKepFitted_core
class KEFittedTruncated(TurbulenceKepFitted_core):
# Variables
order = None # order = None is used to indicate truncation
# Methods
def __init__(self, input):
TurbulenceKepFitted_core.__init__(self, input)
self.input = input
return
def run_init(self):
self.logger.info('Running $k-\epsilon$ fitted turbulence model - init')
self.truncationorder = self.input.v('truncationOrder')
Av, roughness, BottomBC, R = self.main(self.order, init=True)
# load to dictionary
d = {}
d['Roughness'] = roughness
d['Av'] = Av
d['BottomBC'] = BottomBC
if self.input.v('referenceLevel')=='True':
d['R'] = R
return d
def run(self):
self.logger.info('Running $k-\epsilon$ fitted turbulence model')
Av, roughness, _, R = self.main(self.order)
# load to dictionary
d = {}
d['Roughness'] = roughness
d['Av'] = Av
if self.input.v('referenceLevel')=='True':
d['R'] = R
return d
|
YoeriDijkstra/iFlow
|
packages/numerical2DV/turbulence/KEFittedTruncated.py
|
Python
|
lgpl-3.0
| 1,404
|
#####################################################################
# program name: guess.py
# auther: max baseman
# email: dos.fool@gmail.com
# date: 5/01/07
# short description:
# this is a program that has two features
# one that randomly picks numbers till it gets yours
# and one that picks the most effective way till it gets your number
#####################################################################
from random import randrange as random
print "welcome to a number guessing program"
print
print "enter 1 for random "
print "or"
print "enter 2 for efficient"
print
guesstype=input(" >")
if guesstype == 1:
number=input("pick a number >")
numrange=input("pick a range >")+1
guessed=[0]
guess=random(numrange)
if guess==0:
guess=random(numrange)
print guess
guessed.append(guess)
guesses=1
while guess!=number:
guessed.append(guess)
guesses=guesses+1
guess=random(numrange)
if guess in guessed:
while guess in guessed:
guess=random(numrange)
print guess
print"i got the number",number,"in",guesses,"guesses, out of a range of",numrange-1
elif guesstype == 2:
number=input("pick a number >")
numrange=input("pick a range >")+1
guess=numrange/2
print guess
guesses=1
min=0
max=numrange
while guess!=number:
if guess < number:
min=guess
else:
max=guess
guess = (min+max) /2
guesses= guesses +1
print guess
print "i got the number",number,"in",guesses,"guesses, out of a range of",numrange-1
print
|
ActiveState/code
|
recipes/Python/511440_guesspy/recipe-511440.py
|
Python
|
mit
| 1,690
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-14 19:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pootle_store', '0045_remove_suggestion_tmp_state'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pootle_score', '0002_set_user_scores'),
]
operations = [
migrations.CreateModel(
name='UserStoreScore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(db_index=True)),
('score', models.FloatField(db_index=True)),
('reviewed', models.IntegerField(db_index=True, default=0)),
('suggested', models.IntegerField(db_index=True, default=0)),
('translated', models.IntegerField(db_index=True, default=0)),
('store', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_scores', to='pootle_store.Store')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='store_scores', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'db_table': 'pootle_user_store_score',
},
),
migrations.AlterUniqueTogether(
name='userstorescore',
unique_together=set([('date', 'store', 'user')]),
),
]
|
claudep/pootle
|
pootle/apps/pootle_score/migrations/0003_add_pootle_user_store_score.py
|
Python
|
gpl-3.0
| 1,645
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Imports unittest as a replacement for testing.pybase.googletest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import itertools
import os
import sys
import tempfile
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from unittest import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import benchmark # pylint: disable=unused-import
Benchmark = benchmark.TensorFlowBenchmark # pylint: disable=invalid-name
unittest_main = main
# pylint: disable=invalid-name
# pylint: disable=undefined-variable
def g_main(*args, **kwargs):
"""Delegate to unittest.main after redefining testLoader."""
if 'TEST_SHARD_STATUS_FILE' in os.environ:
try:
f = None
try:
f = open(os.environ['TEST_SHARD_STATUS_FILE'], 'w')
f.write('')
except IOError:
sys.stderr.write('Error opening TEST_SHARD_STATUS_FILE (%s). Exiting.'
% os.environ['TEST_SHARD_STATUS_FILE'])
sys.exit(1)
finally:
if f is not None: f.close()
if ('TEST_TOTAL_SHARDS' not in os.environ or
'TEST_SHARD_INDEX' not in os.environ):
return unittest_main(*args, **kwargs)
total_shards = int(os.environ['TEST_TOTAL_SHARDS'])
shard_index = int(os.environ['TEST_SHARD_INDEX'])
base_loader = TestLoader()
delegate_get_names = base_loader.getTestCaseNames
bucket_iterator = itertools.cycle(range(total_shards))
def getShardedTestCaseNames(testCaseClass):
filtered_names = []
for testcase in sorted(delegate_get_names(testCaseClass)):
bucket = next(bucket_iterator)
if bucket == shard_index:
filtered_names.append(testcase)
return filtered_names
# Override getTestCaseNames
base_loader.getTestCaseNames = getShardedTestCaseNames
kwargs['testLoader'] = base_loader
unittest_main(*args, **kwargs)
# Redefine main to allow running benchmarks
def main(): # pylint: disable=function-redefined
benchmark.benchmarks_main(true_main=g_main)
def GetTempDir():
first_frame = inspect.stack()[-1][0]
temp_dir = os.path.join(
tempfile.gettempdir(), os.path.basename(inspect.getfile(first_frame)))
temp_dir = temp_dir.rstrip('.py')
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir, 0o755)
return temp_dir
def test_src_dir_path(relative_path):
"""Creates an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tensorflow root.
e.g. "contrib/session_bundle/example".
Returns:
An absolute path to the linked in runfiles.
"""
return os.path.join(os.environ['TEST_SRCDIR'],
"org_tensorflow/tensorflow", relative_path)
def StatefulSessionAvailable():
return False
class StubOutForTesting(object):
"""Support class for stubbing methods out for unit testing.
Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.CleanUp()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the CleanUp() looks up the old
value of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
"""Do not rely on the destructor to undo your stubs.
You cannot guarantee exactly when the destructor will get called without
relying on implementation details of a Python VM that may change.
"""
self.CleanUp()
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_exc_type, unused_exc_value, unused_tb):
self.CleanUp()
def CleanUp(self):
"""Undoes all SmartSet() & Set() calls, restoring original definitions."""
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr.
This method is smart and works at the module, class, and instance level
while preserving proper inheritance. It will not stub out C types however
unless that has been explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Args:
obj: The object whose attributes we want to modify.
attr_name: The name of the attribute to modify.
new_attr: The new value for the attribute.
Raises:
AttributeError: If the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and attr_name in obj.__dict__)):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
found_attr = False
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
found_attr = True
except AttributeError:
continue
if not found_attr:
raise AttributeError('Attribute not found.')
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses SmartSet() calls, restoring things to original definitions.
This method is automatically called when the StubOutForTesting()
object is deleted; there is no need to call it explicitly.
It is okay to call SmartUnsetAll() repeatedly, as later calls have
no effect if no SmartSet() calls have been made.
"""
for args in reversed(self.stubs):
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""In parent, replace child_name's old definition with new_child.
The parent could be a module when the child is a function at
module scope. Or the parent could be a class when a class' method
is being replaced. The named child is set to new_child, while the
prior definition is saved away for later, when UnsetAll() is
called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
Args:
parent: The context in which the attribute child_name is to be changed.
child_name: The name of the attribute to change.
new_child: The new value of the attribute.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses Set() calls, restoring things to their original definitions.
This method is automatically called when the StubOutForTesting()
object is deleted; there is no need to call it explicitly.
It is okay to call UnsetAll() repeatedly, as later calls have no
effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
for (parent, old_child, child_name) in reversed(self.cache):
setattr(parent, child_name, old_child)
self.cache = []
|
cg31/tensorflow
|
tensorflow/python/platform/googletest.py
|
Python
|
apache-2.0
| 8,887
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions for PKCS#1 version 1.5 encryption and signing
This module implements certain functionality from PKCS#1 version 1.5. For a
very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
At least 8 bytes of random padding is used when encrypting a message. This makes
these methods much more secure than the ones in the ``rsa`` module.
WARNING: this module leaks information when decryption or verification fails.
The exceptions that are raised contain the Python traceback information, which
can be used to deduce where in the process the failure occurred. DO NOT PASS
SUCH INFORMATION to your users.
'''
import hashlib
import os
from rsa._compat import b
from rsa import common, transform, core, varblock
# ASN.1 codes that describe the hash algorithm used.
HASH_ASN1 = {
'MD5': b('\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
'SHA-1': b('\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
'SHA-256': b('\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'),
'SHA-384': b('\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30'),
'SHA-512': b('\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40'),
}
HASH_METHODS = {
'MD5': hashlib.md5,
'SHA-1': hashlib.sha1,
'SHA-256': hashlib.sha256,
'SHA-384': hashlib.sha384,
'SHA-512': hashlib.sha512,
}
class CryptoError(Exception):
'''Base class for all exceptions in this module.'''
class VerificationError(CryptoError):
'''Raised when verification fails.'''
def _pad_for_signing(message, target_length):
r'''Pads the message for signing, returning the padded message.
The padding is always a repetition of FF bytes.
:return: 00 01 PADDING 00 MESSAGE
>>> block = _pad_for_signing('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x01'
>>> block[-6:]
'\x00hello'
>>> block[2:-6]
'\xff\xff\xff\xff\xff\xff\xff\xff'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
padding_length = target_length - msglength - 3
return b('').join([b('\x00\x01'),
padding_length * b('\xff'),
b('\x00'),
message])
def sign(message, priv_key, hash):
'''Signs the message with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param message: the message to sign. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
'''
# Get the ASN1 code for this hash method
if hash not in HASH_ASN1:
raise ValueError('Invalid hash method: %s' % hash)
asn1code = HASH_ASN1[hash]
# Calculate the hash
hash = _hash(message, hash)
# Encrypt the hash with the private key
cleartext = asn1code + hash
keylength = common.byte_size(priv_key.n)
padded = _pad_for_signing(cleartext, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, priv_key.d, priv_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def verify(message, signature, pub_key):
'''Verifies that the signature matches the message.
The hash method is detected automatically from the signature.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:raise VerificationError: when the signature doesn't match the message.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.VerificationError` exception. It shows where in
the code the exception occurred, and thus leaks information about the
key. It's only a tiny bit of information, but every bit makes cracking
the keys easier.
'''
blocksize = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, blocksize)
# If we can't find the signature marker, verification failed.
if clearsig[0:2] != b('\x00\x01'):
raise VerificationError('Verification failed')
# Find the 00 separator between the padding and the payload
try:
sep_idx = clearsig.index(b('\x00'), 2)
except ValueError:
raise VerificationError('Verification failed')
# Get the hash and the hash method
(method_name, signature_hash) = _find_method_hash(clearsig[sep_idx+1:])
message_hash = _hash(message, method_name)
# Compare the real hash to the hash in the signature
if message_hash != signature_hash:
raise VerificationError('Verification failed')
return True
def _hash(message, method_name):
'''Returns the message digest.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param method_name: the hash method, must be a key of
:py:const:`HASH_METHODS`.
'''
if method_name not in HASH_METHODS:
raise ValueError('Invalid hash method: %s' % method_name)
method = HASH_METHODS[method_name]
hasher = method()
if hasattr(message, 'read') and hasattr(message.read, '__call__'):
# read as 1K blocks
for block in varblock.yield_fixedblocks(message, 1024):
hasher.update(block)
else:
# hash the message object itself.
hasher.update(message)
return hasher.digest()
def _find_method_hash(method_hash):
'''Finds the hash method and the hash itself.
:param method_hash: ASN1 code for the hash method concatenated with the
hash itself.
:return: tuple (method, hash) where ``method`` is the used hash method, and
``hash`` is the hash itself.
:raise VerificationFailed: when the hash method cannot be found
'''
for (hashname, asn1code) in HASH_ASN1.items():
if not method_hash.startswith(asn1code):
continue
return (hashname, method_hash[len(asn1code):])
raise VerificationError('Verification failed')
__all__ = ['sign', 'verify',
'VerificationError', 'CryptoError']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 100 == 0:
print('%i times' % count)
print('Doctests done')
|
Acimaz/Google_Apple_Financial_Reporter
|
lib/third_party/rsa/pkcs1.py
|
Python
|
mit
| 8,244
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
from datetime import datetime
from buildbot.changes.bitbucket import BitbucketPullrequestPoller
from buildbot.test.util import changesource
from twisted.internet import defer
from twisted.internet import reactor
from twisted.trial import unittest
from twisted.web import client
from twisted.web.error import Error
class SourceRest():
template = """\
{
"hash": "%(hash)s",
"links": {
"html": {
"href": "https://bitbucket.org/%(owner)s/%(slug)s/commits/%(hash)s"
}
},
"repository": {
"links": {
"self": {
"href": "https://bitbucket.org/!api/2.0/repositories/%(owner)s/%(slug)s"
}
}
},
"date": "%(date)s"
}
"""
repo_template = """\
{
"links": {
"html": {
"href": "https://bitbucket.org/%(owner)s/%(slug)s"
}
}
}
"""
def __init__(self, owner, slug, hash, date):
self.owner = owner
self.slug = slug
self.hash = hash
self.date = date
def request(self):
return self.template % {
"owner": self.owner,
"slug": self.slug,
"hash": self.hash,
"date": self.date,
}
def repo_request(self):
return self.repo_template % {
"owner": self.owner,
"slug": self.slug,
}
class PullRequestRest():
template = """\
{
"description": "%(description)s",
"title": "%(title)s",
"source": {
"commit": {
"hash": "%(hash)s",
"links": {
"self": {
"href": "https://bitbucket.org/!api/2.0/repositories/%(owner)s/%(slug)s/commit/%(hash)s"
}
}
}
},
"state": "OPEN",
"author": {
"display_name": "%(display_name)s"
},
"created_on": "%(created_on)s",
"participants": [
],
"updated_on": "%(updated_on)s",
"merge_commit": null,
"id": %(id)d
}
"""
def __init__(self, nr, title, description, display_name, source, created_on, updated_on=None):
self.nr = nr
self.title = title
self.description = description
self.display_name = display_name
self.source = source
self.created_on = created_on
if updated_on:
self.updated_on = updated_on
else:
self.updated_on = self.created_on
def request(self):
return self.template % {
"description": self.description,
"title": self.title,
"hash": self.source.hash,
"owner": self.source.owner,
"slug": self.source.slug,
"display_name": self.display_name,
"created_on": self.created_on,
"updated_on": self.updated_on,
"id": self.nr,
}
class PullRequestListRest():
template = """\
{
"description": "%(description)s",
"links": {
"self": {
"href": "https://bitbucket.org/!api/2.0/repositories/%(owner)s/%(slug)s/pullrequests/%(id)d"
},
"html": {
"href": "https://bitbucket.org/%(owner)s/%(slug)s/pull-request/%(id)d"
}
},
"author": {
"display_name": "%(display_name)s"
},
"title": "%(title)s",
"source": {
"commit": {
"hash": "%(hash)s",
"links": {
"self": {
"href": "https://bitbucket.org/!api/2.0/repositories/%(src_owner)s/%(src_slug)s/commit/%(hash)s"
}
}
},
"repository": {
"links": {
"self": {
"href": "https://bitbucket.org/!api/2.0/repositories/%(src_owner)s/%(src_slug)s"
}
}
},
"branch": {
"name": "default"
}
},
"state": "OPEN",
"created_on": "%(created_on)s",
"updated_on": "%(updated_on)s",
"merge_commit": null,
"id": %(id)s
}
"""
def __init__(self, owner, slug, prs):
self.owner = owner
self.slug = slug
self.prs = prs
self.pr_by_id = {}
self.src_by_url = {}
for pr in prs:
self.pr_by_id[pr.nr] = pr
self.src_by_url["%s/%s"
% (pr.source.owner, pr.source.slug)] = pr.source
def request(self):
s = ""
for pr in self.prs:
s += self.template % {
"description": pr.description,
"owner": self.owner,
"slug": self.slug,
"display_name": pr.display_name,
"title": pr.title,
"hash": pr.source.hash,
"src_owner": pr.source.owner,
"src_slug": pr.source.slug,
"created_on": pr.created_on,
"updated_on": pr.updated_on,
"id": pr.nr,
}
return """\
{
"pagelen": 10,
"values": [%s
],
"page": 1
}
""" % s
def getPage(self, url, timeout=None):
list_url_re = re.compile(
r"https://bitbucket.org/api/2.0/repositories/%s/%s/pullrequests"
% (self.owner, self.slug))
pr_url_re = re.compile(
r"https://bitbucket.org/!api/2.0/repositories/%s/%s/pullrequests/(?P<id>\d+)"
% (self.owner, self.slug))
source_commit_url_re = re.compile(
r"https://bitbucket.org/!api/2.0/repositories/(?P<src_owner>.*)/(?P<src_slug>.*)/commit/(?P<hash>\d+)")
source_url_re = re.compile(
r"https://bitbucket.org/!api/2.0/repositories/(?P<src_owner>.*)/(?P<src_slug>.*)")
if list_url_re.match(url):
return defer.succeed(self.request())
m = pr_url_re.match(url)
if m:
return self.pr_by_id[int(m.group("id"))].request()
m = source_commit_url_re.match(url)
if m:
return self.src_by_url["%s/%s"
% (m.group("src_owner"), m.group("src_slug"))].request()
m = source_url_re.match(url)
if m:
return self.src_by_url["%s/%s"
% (m.group("src_owner"), m.group("src_slug"))].repo_request()
raise Error(code=404)
class TestBitbucketPullrequestPoller(changesource.ChangeSourceMixin, unittest.TestCase):
def setUp(self):
# create pull requests
self.date = "2013-10-15T20:38:20.001797+00:00"
self.date_epoch = datetime.strptime(self.date.split('.')[0],
'%Y-%m-%dT%H:%M:%S')
src = SourceRest(
owner="contributor",
slug="slug",
hash="000000000000000000000000000001",
date=self.date,
)
pr = PullRequestRest(
nr=1,
title="title",
description="description",
display_name="contributor",
source=src,
created_on=self.date,
)
self.pr_list = PullRequestListRest(
owner="owner",
slug="slug",
prs=[pr],
)
# update
src = SourceRest(
owner="contributor",
slug="slug",
hash="000000000000000000000000000002",
date=self.date,
)
pr = PullRequestRest(
nr=1,
title="title",
description="description",
display_name="contributor",
source=src,
created_on=self.date,
)
self.pr_list2 = PullRequestListRest(
owner="owner",
slug="slug",
prs=[pr],
)
d = self.setUpChangeSource()
def create_poller(_):
self.attachChangeSource(BitbucketPullrequestPoller(
owner='owner',
slug='slug',
))
d.addCallback(create_poller)
return d
def tearDown(self):
return self.tearDownChangeSource()
def _fakeGetPage(self, result):
# Install a fake getPage that puts the requested URL in self.getPage_got_url
# and return result
self.getPage_got_url = None
def fake(url, timeout=None):
self.getPage_got_url = url
return defer.succeed(result)
self.patch(client, "getPage", fake)
def _fakeGetPage404(self):
def fail(url, timeout=None):
raise Error(code=404)
self.patch(client, "getPage", fail)
# tests
def test_describe(self):
assert re.search(r'owner/slug', self.changesource.describe())
def test_poll_unknown_repo(self):
# Polling a non-existent repository should result in a 404
self._fakeGetPage404()
d = self.changesource.poll()
def check(_):
self.fail(
'Polling a non-existent repository should result in a 404.')
def err(e):
self.assertEqual(e.getErrorMessage(), '404 Not Found')
d.addCallback(check)
d.addErrback(err)
return d
def test_poll_no_pull_requests(self):
rest = PullRequestListRest(owner="owner", slug="slug", prs=[])
self._fakeGetPage(rest.request())
d = self.changesource.poll()
def check(_):
self.assertEqual(len(self.master.data.updates.changesAdded), 0)
d.addCallback(check)
return d
def test_poll_new_pull_requests(self):
# patch client.getPage()
self.patch(client, "getPage", self.pr_list.getPage)
d = self.changesource.poll()
def check(_):
self.assertEqual(self.master.data.updates.changesAdded, [{
'author': u'contributor',
'branch': None,
'category': None,
'codebase': None,
'comments': u'pull-request #1: title\nhttps://bitbucket.org/owner/slug/pull-request/1',
'files': None,
'project': u'',
'properties': {},
'repository': u'https://bitbucket.org/contributor/slug',
'revision': u'000000000000000000000000000001',
'revlink': u'https://bitbucket.org/contributor/slug/commits/000000000000000000000000000001',
'src': u'bitbucket',
'when_timestamp': 1381869500,
}])
d.addCallback(check)
return d
def test_poll_no_updated_pull_request(self):
# patch client.getPage()
self.patch(client, "getPage", self.pr_list.getPage)
d = self.changesource.poll()
def check(_):
self.assertEqual(self.master.data.updates.changesAdded, [{
'author': u'contributor',
'branch': None,
'category': None,
'codebase': None,
'comments': u'pull-request #1: title\nhttps://bitbucket.org/owner/slug/pull-request/1',
'files': None,
'project': u'',
'properties': {},
'repository': u'https://bitbucket.org/contributor/slug',
'revision': u'000000000000000000000000000001',
'revlink': u'https://bitbucket.org/contributor/slug/commits/000000000000000000000000000001',
'src': u'bitbucket',
'when_timestamp': 1381869500,
}])
# repoll
d = self.changesource.poll()
def check2(_):
self.assertEqual(len(self.master.data.updates.changesAdded), 1)
d.addCallback(check2)
d.addCallback(check)
return d
def test_poll_updated_pull_request(self):
# patch client.getPage()
self.patch(client, "getPage", self.pr_list.getPage)
d = self.changesource.poll()
def check(_):
self.assertEqual(self.master.data.updates.changesAdded, [{
'author': u'contributor',
'branch': None,
'category': None,
'codebase': None,
'comments': u'pull-request #1: title\nhttps://bitbucket.org/owner/slug/pull-request/1',
'files': None,
'project': u'',
'properties': {},
'repository': u'https://bitbucket.org/contributor/slug',
'revision': u'000000000000000000000000000001',
'revlink': u'https://bitbucket.org/contributor/slug/commits/000000000000000000000000000001',
'src': u'bitbucket',
'when_timestamp': 1381869500,
}])
self.patch(client, "getPage", self.pr_list2.getPage)
d = self.changesource.poll()
def check2(_):
self.assertEqual(self.master.data.updates.changesAdded, [
{
'author': u'contributor',
'branch': None,
'category': None,
'codebase': None,
'comments': u'pull-request #1: title\nhttps://bitbucket.org/owner/slug/pull-request/1',
'files': None,
'project': u'',
'properties': {},
'repository': u'https://bitbucket.org/contributor/slug',
'revision': u'000000000000000000000000000001',
'revlink': u'https://bitbucket.org/contributor/slug/commits/000000000000000000000000000001',
'src': u'bitbucket',
'when_timestamp': 1381869500,
},
{
'author': u'contributor',
'branch': None,
'category': None,
'codebase': None,
'comments': u'pull-request #1: title\nhttps://bitbucket.org/owner/slug/pull-request/1',
'files': None,
'project': u'',
'properties': {},
'repository': u'https://bitbucket.org/contributor/slug',
'revision': u'000000000000000000000000000002',
'revlink': u'https://bitbucket.org/contributor/slug/commits/000000000000000000000000000002',
'src': u'bitbucket',
'when_timestamp': 1381869500,
}
])
d.addCallback(check2)
return d
d.addCallback(check)
return d
def test_poll_pull_request_filter_False(self):
self.attachChangeSource(BitbucketPullrequestPoller(
owner='owner',
slug='slug',
pullrequest_filter=lambda x: False
))
# patch client.getPage()
self.patch(client, "getPage", self.pr_list.getPage)
d = self.changesource.poll()
def check(_):
self.assertEqual(len(self.master.data.updates.changesAdded), 0)
d.addCallback(check)
return d
def test_poll_pull_request_filter_True(self):
self.attachChangeSource(BitbucketPullrequestPoller(
owner='owner',
slug='slug',
pullrequest_filter=lambda x: True
))
# patch client.getPage()
self.patch(client, "getPage", self.pr_list.getPage)
d = self.changesource.poll()
def check(_):
self.assertEqual(self.master.data.updates.changesAdded, [{
'author': u'contributor',
'branch': None,
'category': None,
'codebase': None,
'comments': u'pull-request #1: title\nhttps://bitbucket.org/owner/slug/pull-request/1',
'files': None,
'project': u'',
'properties': {},
'repository': u'https://bitbucket.org/contributor/slug',
'revision': u'000000000000000000000000000001',
'revlink': u'https://bitbucket.org/contributor/slug/commits/000000000000000000000000000001',
'src': u'bitbucket',
'when_timestamp': 1381869500,
}])
d.addCallback(check)
return d
def test_poll_pull_request_not_useTimestamps(self):
self.attachChangeSource(BitbucketPullrequestPoller(
owner='owner',
slug='slug',
useTimestamps=False,
))
# patch client.getPage()
self.patch(client, "getPage", self.pr_list.getPage)
self.patch(reactor, "seconds", lambda: 1396825656)
d = self.changesource.poll()
def check(_):
self.assertEqual(self.master.data.updates.changesAdded, [{
'author': u'contributor',
'branch': None,
'category': None,
'codebase': None,
'comments': u'pull-request #1: title\nhttps://bitbucket.org/owner/slug/pull-request/1',
'files': None,
'project': u'',
'properties': {},
'repository': u'https://bitbucket.org/contributor/slug',
'revision': u'000000000000000000000000000001',
'revlink': u'https://bitbucket.org/contributor/slug/commits/000000000000000000000000000001',
'src': u'bitbucket',
'when_timestamp': 1396825656,
}])
d.addCallback(check)
return d
|
zozo123/buildbot
|
master/buildbot/test/unit/test_changes_bitbucket.py
|
Python
|
gpl-3.0
| 18,476
|
# This program will calculate the weight of thre different atoms Hydrogen, Carbon, and Oxygen
# You will input the number of atoms per molecule
def main():
h = 1.0079 #hydrogen weight
c = 12.011 # carbon weight
o = 15.9994 #Oxygen weight
# Prompts for the number of molecules per element
noH = eval(input("Enter the number of Hydrogen molecules: "))
noC = eval(input("Enter the number of Carbon molecules: "))
noO = eval(input("Enter the number of Oxygen molecules: "))
#Calculate the total weight of each element
totalH = h * noH
totalC = c * noC
totalO = o * noO
# Display the weight of each element
print("The total weight of Hydrogen is: ", round(totalH, 5))
print("The total weight of Carbon is: ", round(totalC, 5))
print("The total weight of Oxygen is: ", round(totalO, 5))
# Display the weight of all elements
print("The total of all molecules is: ", round(totalO + totalC + totalH, 4))
main()
|
src053/PythonComputerScience
|
chap3/molWeight.py
|
Python
|
cc0-1.0
| 923
|
class PingSampler(object):
def __init__(self, *args, numSamples=5):
if numSamples <= 0:
raise ValueError('numSamples must be a positive integer')
self.numSamples = int(numSamples)
self.samples = list(args)
def __len__(self):
return len(self.samples)
def __iadd__(self, value):
self.samples = (self.samples + [value])[-self.numSamples:]
return self
@property
def average(self):
try:
return sum(self.samples) / len(self.samples)
except ZeroDivisionError:
return None
def __repr__(self):
return '<PingSampler num={}>'.format(self.numSamples)
|
HendrikF/transmitter
|
transmitter/PingSampler.py
|
Python
|
bsd-3-clause
| 691
|
# coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import pathlib
import yaml
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils._text import to_bytes
# Pylint doesn't understand Python3 namespace modules.
from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
from ..commands import Command # pylint: disable=relative-beyond-top-level
from ..jinja2.filters import documented_type, html_ify # pylint: disable=relative-beyond-top-level
DEFAULT_TEMPLATE_FILE = 'collections_galaxy_meta.rst.j2'
DEFAULT_TEMPLATE_DIR = pathlib.Path(__file__).parents[4] / 'docs/templates'
class DocumentCollectionMeta(Command):
name = 'collection-meta'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name, description='Generate collection galaxy.yml documentation from shared metadata')
parser.add_argument("-t", "--template-file", action="store", dest="template_file",
default=DEFAULT_TEMPLATE_FILE,
help="Jinja2 template to use for the config")
parser.add_argument("-T", "--template-dir", action="store", dest="template_dir",
default=DEFAULT_TEMPLATE_DIR,
help="directory containing Jinja2 templates")
parser.add_argument("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/',
help="Output directory for rst files")
parser.add_argument("collection_defs", metavar="COLLECTION-OPTION-DEFINITIONS.yml", type=str,
help="Source for collection metadata option docs")
@staticmethod
def main(args):
output_dir = os.path.abspath(args.output_dir)
template_file_full_path = os.path.abspath(os.path.join(args.template_dir, args.template_file))
template_file = os.path.basename(template_file_full_path)
template_dir = os.path.dirname(template_file_full_path)
with open(args.collection_defs) as f:
options = yaml.safe_load(f)
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True)
env.filters['documented_type'] = documented_type
env.filters['html_ify'] = html_ify
template = env.get_template(template_file)
output_name = os.path.join(output_dir, template_file.replace('.j2', ''))
temp_vars = {'options': options}
data = to_bytes(template.render(temp_vars))
update_file_if_different(output_name, data)
return 0
|
cchurch/ansible
|
hacking/build_library/build_ansible/command_plugins/collection_meta.py
|
Python
|
gpl-3.0
| 2,953
|
# -*- coding: utf-8 -*-
from textwrap import dedent
from django import VERSION
import pytest
from djmoney.models.fields import CurrencyField, MoneyField
from .helpers import get_models, get_operations
if VERSION >= (1, 7):
from django.db import migrations
else:
migrations = None
@pytest.mark.usefixtures('coveragerc')
class BaseMigrationTests:
installed_apps = ['djmoney', 'money_app']
migration_output = ()
@pytest.fixture(autouse=True)
def setup(self, testdir):
"""
Creates application module, helpers and settings file with basic config.
"""
self.testdir = testdir
self.project_root = testdir.mkpydir('money_app')
testdir.makepyfile(app_settings='''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = %s
SECRET_KEY = 'foobar'
''' % str(self.installed_apps))
self.project_root.join('migrations/__init__.py').ensure()
testdir.syspathinsert()
def make_models(self, content):
"""
Creates models.py file.
"""
fd = self.project_root.join('models.py')
fd.write(dedent(content))
def make_migration(self, **fields):
"""
Creates a model with provided fields and creates a migration for it.
"""
if fields:
fields_definition = ';'.join(
'='.join([field, definition]) for field, definition in fields.items()
)
else:
fields_definition = 'pass'
self.make_models('''
from django.db import models
from djmoney.models.fields import MoneyField
class Model(models.Model):
%s''' % fields_definition)
return self.run('from tests.migrations.helpers import makemigrations; makemigrations();')
def make_default_migration(self):
return self.make_migration(field='MoneyField(max_digits=10, decimal_places=2)')
def run(self, content):
# To collect coverage data from the call
self.testdir.makepyfile(test_migration=content)
return self.testdir.runpytest_subprocess(
'--ds', 'app_settings', '-s', '--verbose',
'--cov-append', '--cov', 'djmoney', '--cov-config', 'coveragerc.ini'
)
def migrate(self):
return self.run('from tests.migrations.helpers import migrate; migrate();')
def assert_migrate(self):
"""
Runs migrations and checks if 2 migrations were applied.
"""
migration = self.migrate()
migration.stdout.fnmatch_lines(self.migration_output)
@pytest.mark.skipif(VERSION >= (1, 7), reason='Django 1.7+ has migration framework')
class TestSouth(BaseMigrationTests):
"""
Tests for South-based migrations on Django < 1.7.
"""
installed_apps = BaseMigrationTests.installed_apps + ['south']
migration_output = [
'* - Migrating forwards to 0002_test.*',
'*> money_app:0001_test*',
'*> money_app:0002_test*',
'*- Loading initial data for money_app.*',
]
def test_create_initial(self):
migration = self.make_default_migration()
migration.stderr.fnmatch_lines([
'*Added model money_app.Model*',
'*Created 0001_test.py*'
])
models = get_models('0001')
assert models['field'] == (
'djmoney.models.fields.MoneyField',
[],
{
'max_digits': '10',
'decimal_places': '2',
'default_currency': "'XYZ'"
}
)
assert models['field_currency'] == ('djmoney.models.fields.CurrencyField', [], {})
migration = self.migrate()
migration.stdout.fnmatch_lines([
'*Creating table south_migrationhistory*',
'* - Migrating forwards to 0001_test.*',
'*> money_app:0001_test*',
])
def test_alter_field(self):
self.make_default_migration()
migration = self.make_migration(field='MoneyField(max_digits=15, decimal_places=2)')
migration.stderr.fnmatch_lines([
'*~ Changed field field on money_app.Model*',
'*Created 0002_test.py*',
])
models = get_models('0002')
assert models['field'] == (
'djmoney.models.fields.MoneyField',
[],
{'max_digits': '15', 'decimal_places': '2', 'default_currency': "'XYZ'"}
)
assert models['field_currency'] == ('djmoney.models.fields.CurrencyField', [], {})
self.assert_migrate()
def test_add_field(self):
self.make_default_migration()
migration = self.make_migration(
field='MoneyField(max_digits=10, decimal_places=2)',
value="MoneyField(max_digits=5, decimal_places=2, default_currency='GBP')"
)
migration.stderr.fnmatch_lines(['*+ Added field value_currency on money_app.Model*'])
migration.stderr.fnmatch_lines([
'*+ Added field value on money_app.Model*',
'*Created 0002_test.py*',
])
models = get_models('0002')
assert models['field'] == (
'djmoney.models.fields.MoneyField',
[],
{'max_digits': '10', 'decimal_places': '2', 'default_currency': "'XYZ'"}
)
assert models['field_currency'] == ('djmoney.models.fields.CurrencyField', [], {})
assert models['value'] == (
'djmoney.models.fields.MoneyField',
[],
{'max_digits': '5', 'decimal_places': '2', 'default_currency': "'GBP'"}
)
assert models['value_currency'] == ('djmoney.models.fields.CurrencyField', [], {'default': "'GBP'"})
self.assert_migrate()
def test_remove_field(self):
self.make_default_migration()
migration = self.make_migration()
migration.stderr.fnmatch_lines(['*- Deleted field field_currency on money_app.Model*'])
migration.stderr.fnmatch_lines([
'*- Deleted field field on money_app.Model*',
'*Created 0002_test.py*',
])
models = get_models('0002')
assert models == {
'Meta': {'object_name': 'Model'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
self.assert_migrate()
@pytest.mark.skipif(VERSION < (1, 7), reason='Django 1.7+ has migration framework')
class TestMigrationFramework(BaseMigrationTests):
migration_output = [
'*Applying money_app.0001_test... OK*',
'*Applying money_app.0002_test... OK*',
]
def test_create_initial(self):
migration = self.make_default_migration()
migration.stdout.fnmatch_lines([
"*Migrations for 'money_app':*",
'*0001_test.py*',
'*- Create model Model*',
])
operations = get_operations('0001')
assert len(operations) == 1
assert isinstance(operations[0], migrations.CreateModel)
fields = sorted(operations[0].fields)
assert len(fields) == 3
assert fields[0][0] == 'field'
assert isinstance(fields[0][1], MoneyField)
assert fields[1][0] == 'field_currency'
assert isinstance(fields[1][1], CurrencyField)
migration = self.migrate()
migration.stdout.fnmatch_lines(['*Applying money_app.0001_test... OK*'])
def test_add_field(self):
self.make_migration()
migration = self.make_default_migration()
migration.stdout.fnmatch_lines([
"*Migrations for 'money_app':*",
'*0002_test.py*',
'*- Add field field to model*',
'*- Add field field_currency to model*',
])
operations = get_operations('0002')
assert len(operations) == 2
assert isinstance(operations[0], migrations.AddField)
assert isinstance(operations[0].field, MoneyField)
assert isinstance(operations[1], migrations.AddField)
assert isinstance(operations[1].field, CurrencyField)
self.assert_migrate()
def test_alter_field(self):
self.make_default_migration()
migration = self.make_migration(field='MoneyField(max_digits=15, decimal_places=2)')
migration.stdout.fnmatch_lines([
"*Migrations for 'money_app':*",
'*0002_test.py*',
'*- Alter field field on model*',
])
operations = get_operations('0002')
assert len(operations) == 1
assert isinstance(operations[0], migrations.AlterField)
assert isinstance(operations[0].field, MoneyField)
assert operations[0].field.max_digits == 15
self.assert_migrate()
def test_remove_field(self):
self.make_default_migration()
migration = self.make_migration()
migration.stdout.fnmatch_lines([
"*Migrations for 'money_app':*",
'*0002_test.py*',
'*- Remove field field from model*',
'*- Remove field field_currency from model*',
])
operations = get_operations('0002')
assert len(operations) == 2
assert isinstance(operations[0], migrations.RemoveField)
assert operations[0].name == 'field'
assert isinstance(operations[1], migrations.RemoveField)
assert operations[1].name == 'field_currency'
self.assert_migrate()
|
rescale/django-money
|
tests/migrations/test_migrations.py
|
Python
|
bsd-3-clause
| 9,480
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.GIS_Connectivity.IEC61968.AssetModels.ConductorInfo import ConductorInfo
class CableInfo(ConductorInfo):
"""Cable data.
"""
def __init__(self, nominalTemperature=0.0, diameterOverScreen=0.0, sheathAsNeutral=False, diameterOverJacket=0.0, diameterOverCore=0.0, constructionKind="solid", outerJacketKind="insulating", isStrandFill=False, shieldMaterial="other", diameterOverInsulation=0.0, *args, **kw_args):
"""Initialises a new 'CableInfo' instance.
@param nominalTemperature: Maximum nominal design operating temperature.
@param diameterOverScreen: Diameter over the outer screen; should be the shield's inside diameter..
@param sheathAsNeutral: True if sheath / shield is used as a neutral (i.e., bonded).
@param diameterOverJacket: Diameter over the outermost jacketing layer.
@param diameterOverCore: Diameter over the core, including any semi-con screen; should be the insulating layer's inside diameter.
@param constructionKind: Kind of construction of this cable. Values are: "solid", "stranded", "other", "segmental", "compacted", "sector", "compressed"
@param outerJacketKind: Kind of outer jacket of this cable. Values are: "insulating", "other", "semiconducting", "polyethylene", "none", "linearLowDensityPolyethylene", "pvc"
@param isStrandFill: True if wire strands are extruded in a way to fill the voids in the cable.
@param shieldMaterial: Material of the shield. Values are: "other", "lead", "steel", "aluminum", "copper"
@param diameterOverInsulation: Diameter over the insulating layer, excluding outer screen.
"""
#: Maximum nominal design operating temperature.
self.nominalTemperature = nominalTemperature
#: Diameter over the outer screen; should be the shield's inside diameter..
self.diameterOverScreen = diameterOverScreen
#: True if sheath / shield is used as a neutral (i.e., bonded).
self.sheathAsNeutral = sheathAsNeutral
#: Diameter over the outermost jacketing layer.
self.diameterOverJacket = diameterOverJacket
#: Diameter over the core, including any semi-con screen; should be the insulating layer's inside diameter.
self.diameterOverCore = diameterOverCore
#: Kind of construction of this cable. Values are: "solid", "stranded", "other", "segmental", "compacted", "sector", "compressed"
self.constructionKind = constructionKind
#: Kind of outer jacket of this cable. Values are: "insulating", "other", "semiconducting", "polyethylene", "none", "linearLowDensityPolyethylene", "pvc"
self.outerJacketKind = outerJacketKind
#: True if wire strands are extruded in a way to fill the voids in the cable.
self.isStrandFill = isStrandFill
#: Material of the shield. Values are: "other", "lead", "steel", "aluminum", "copper"
self.shieldMaterial = shieldMaterial
#: Diameter over the insulating layer, excluding outer screen.
self.diameterOverInsulation = diameterOverInsulation
super(CableInfo, self).__init__(*args, **kw_args)
_attrs = ["nominalTemperature", "diameterOverScreen", "sheathAsNeutral", "diameterOverJacket", "diameterOverCore", "constructionKind", "outerJacketKind", "isStrandFill", "shieldMaterial", "diameterOverInsulation"]
_attr_types = {"nominalTemperature": float, "diameterOverScreen": float, "sheathAsNeutral": bool, "diameterOverJacket": float, "diameterOverCore": float, "constructionKind": str, "outerJacketKind": str, "isStrandFill": bool, "shieldMaterial": str, "diameterOverInsulation": float}
_defaults = {"nominalTemperature": 0.0, "diameterOverScreen": 0.0, "sheathAsNeutral": False, "diameterOverJacket": 0.0, "diameterOverCore": 0.0, "constructionKind": "solid", "outerJacketKind": "insulating", "isStrandFill": False, "shieldMaterial": "other", "diameterOverInsulation": 0.0}
_enums = {"constructionKind": "CableConstructionKind", "outerJacketKind": "CableOuterJacketKind", "shieldMaterial": "CableShieldMaterialKind"}
_refs = []
_many_refs = []
|
rwl/PyCIM
|
CIM14/CDPSM/GIS_Connectivity/IEC61968/AssetModels/CableInfo.py
|
Python
|
mit
| 5,250
|
"""The table of contents needed isn't quite the same as the toc provided by
the api. We need to modify it a bit to group subparts, subterps, etc. These
modifications, then, are used for navigation, citations, and the TOC
layer"""
from regulations.generator import title_parsing
from regulations.generator.api_reader import ApiReader
def fetch_toc(reg_part, version, flatten=False):
"""Fetch the toc, transform it into a list usable by navigation, etc."""
api = ApiReader()
toc = api.layer('toc', reg_part, version)
toc_list = []
for data in toc[reg_part]:
if 'Subpart' in data['index']:
toc_list.append(toc_subpart(data, toc_list, toc))
elif 'Interp' in data['index']:
toc_list.append(toc_interp(data, toc_list, toc))
else:
toc_list.append(toc_sect_appendix(data, toc_list))
if flatten:
flattened = []
for el in toc_list:
if 'sub_toc' in el:
flattened.extend(el['sub_toc'])
else:
flattened.append(el)
return flattened
return toc_list
def toc_sect_appendix(data, so_far):
"""Transforms normal sections and appendices"""
title_data = title_parsing.section(data)
if title_data:
data.update(title_data)
title_data = title_parsing.appendix_supplement(data)
if title_data:
data.update(title_data)
if 'label' not in data:
data['label'] = data['title']
if data.get('is_appendix'):
seen_appendix = any(el.get('is_appendix') for el in so_far)
data['is_first_appendix'] = not seen_appendix
return data
def toc_subpart(data, so_far, toc):
"""Transforms a subpart, giving it sectional children"""
element = {
'label': ' '.join(data['index'][1:]),
'sub_label': data['title'],
'index': data['index'],
'section_id': '-'.join(data['index']),
'is_subpart': True,
'sub_toc': []
}
for sub in toc.get('-'.join(data['index']), []):
element['sub_toc'].append(toc_sect_appendix(sub, so_far))
return element
def toc_interp(data, so_far, toc):
"""Transforms a subpart, expanding it into subterps (collections of
interpreted subparts, empty part, and appendices"""
segments = title_parsing.try_split(data['title'])
if not segments:
segments = 'Supplement I', ''
element = {
'label': segments[0],
'sub_label': segments[1],
'index': data['index'],
'section_id': '-'.join(data['index']),
'is_supplement': True,
'sub_toc': []
}
reg_part = data['index'][0]
element['sub_toc'].extend(intro_interps(toc, reg_part))
element['sub_toc'].extend(subterps(so_far, reg_part))
return element
def intro_interps(toc, reg_part):
"""Logic to fill in any introduction headers for the entire
interpretations. Note that at some point, we might have headers randomly
appear elsewhere in the interpretations, unrelated to a specific
section. That's a @todo."""
elements = []
for el in toc.get(reg_part + '-Interp', []):
if el['index'][1] == 'Interp':
elements.append({
'label': 'Interpretations',
'sub_label': el['title'],
'index': el['index'],
'section_id': '-'.join(el['index'])})
return elements
def subterps(so_far, reg_part):
"""Logic to build subterps, collections of interpretations for subparts,
the empty subpart, or appendices"""
elements = []
found_subpart = False
found_appendix = False
for el in so_far:
if el.get('is_subpart'):
found_subpart = True
index = el['index'] + ['Interp']
elements.append({
'label': el['label'],
'sub_label': el['sub_label'],
'index': index,
'is_subterp': True,
'section_id': '-'.join(index)
})
elif el.get('is_appendix') and not found_appendix:
found_appendix = True
index = el['index'][:1] + ['Appendices', 'Interp']
elements.append({
'label': 'Appendices',
'index': index,
'is_subterp': True,
'section_id': '-'.join(index)
})
if not found_subpart: # Add the empty subpart
index = [reg_part, 'Subpart', 'Interp']
elements.insert(0, {
'label': 'Regulation Text',
'index': index,
'is_subterp': True,
'section_id': '-'.join(index)
})
return elements
|
adderall/regulations-site
|
regulations/generator/toc.py
|
Python
|
cc0-1.0
| 4,642
|
# Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Class for unittest support. Used for capturing stderr/stdout.
import logging
import unittest # Don't use unittest2 here as the autoinstaller may not have it yet.
import sys
from StringIO import StringIO
class OutputCapture(object):
# By default we capture the output to a stream. Other modules may override
# this function in order to do things like pass through the output. See
# webkitpy.test.main for an example.
@staticmethod
def stream_wrapper(stream):
return StringIO()
def __init__(self):
self.saved_outputs = dict()
self._log_level = logging.INFO
def set_log_level(self, log_level):
self._log_level = log_level
if hasattr(self, '_logs_handler'):
self._logs_handler.setLevel(self._log_level)
def _capture_output_with_name(self, output_name):
stream = getattr(sys, output_name)
captured_output = self.stream_wrapper(stream)
self.saved_outputs[output_name] = stream
setattr(sys, output_name, captured_output)
return captured_output
def _restore_output_with_name(self, output_name):
captured_output = getattr(sys, output_name).getvalue()
setattr(sys, output_name, self.saved_outputs[output_name])
del self.saved_outputs[output_name]
return captured_output
def capture_output(self):
self._logs = StringIO()
self._logs_handler = logging.StreamHandler(self._logs)
self._logs_handler.setLevel(self._log_level)
self._logger = logging.getLogger()
self._orig_log_level = self._logger.level
self._logger.addHandler(self._logs_handler)
self._logger.setLevel(min(self._log_level, self._orig_log_level))
return (self._capture_output_with_name("stdout"), self._capture_output_with_name("stderr"))
def restore_output(self):
self._logger.removeHandler(self._logs_handler)
self._logger.setLevel(self._orig_log_level)
self._logs_handler.flush()
self._logs.flush()
logs_string = self._logs.getvalue()
delattr(self, '_logs_handler')
delattr(self, '_logs')
return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr"), logs_string)
def assert_outputs(self, testcase, function, args=[], kwargs={}, expected_stdout="", expected_stderr="", expected_exception=None, expected_logs=None):
self.capture_output()
try:
if expected_exception:
return_value = testcase.assertRaises(expected_exception, function, *args, **kwargs)
else:
return_value = function(*args, **kwargs)
finally:
(stdout_string, stderr_string, logs_string) = self.restore_output()
if hasattr(testcase, 'assertMultiLineEqual'):
testassert = testcase.assertMultiLineEqual
else:
testassert = testcase.assertEqual
testassert(stdout_string, expected_stdout)
testassert(stderr_string, expected_stderr)
if expected_logs is not None:
testassert(logs_string, expected_logs)
# This is a little strange, but I don't know where else to return this information.
return return_value
class OutputCaptureTestCaseBase(unittest.TestCase):
maxDiff = None
def setUp(self):
unittest.TestCase.setUp(self)
self.output_capture = OutputCapture()
(self.__captured_stdout, self.__captured_stderr) = self.output_capture.capture_output()
def tearDown(self):
del self.__captured_stdout
del self.__captured_stderr
self.output_capture.restore_output()
unittest.TestCase.tearDown(self)
def assertStdout(self, expected_stdout):
self.assertEqual(expected_stdout, self.__captured_stdout.getvalue())
def assertStderr(self, expected_stderr):
self.assertEqual(expected_stderr, self.__captured_stderr.getvalue())
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/Scripts/webkitpy/common/system/outputcapture.py
|
Python
|
bsd-3-clause
| 5,478
|
#!/usr/bin/env python
# This Python file uses the following encoding: utf-8
# Find details about this project at https://github.com/e1ven/robohash
from __future__ import unicode_literals
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import socket
import os
import hashlib
import random
from robohash import Robohash
import re
import io
import base64
# Import urllib stuff that works in both Py2 and Py3
try:
import urllib.request
import urllib.parse
urlopen = urllib.request.urlopen
urlencode = urllib.parse.urlencode
except ImportError:
import urllib2
import urllib
urlopen = urllib2.urlopen
urlencode = urllib.urlencode
from tornado.options import define, options
import io
define("port", default=80, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
ip = self.request.remote_ip
robo = [
"""
, ,
(\\____/)
(_oo_)
(O)
__||__ \\)
[]/______\\[] /
/ \\______/ \\/
/ /__\\
(\\ /____\\ """,
"""
_______
_/ \\_
/ | | \\
/ |__ __| \\
|__/((o| |o))\\__|
| | | |
|\\ |_| /|
| \\ / |
\\| / ___ \\ |/
\\ | / _ \\ | /
\\_________/
_|_____|_
____|_________|____
/ \\ -- Mark Moir
""",
""" .andAHHAbnn.
.aAHHHAAUUAAHHHAn.
dHP^~" "~^THb.
. .AHF YHA. .
| .AHHb. .dHHA. |
| HHAUAAHAbn adAHAAUAHA |
I HF~"_____ ____ ]HHH I
HHI HAPK""~^YUHb dAHHHHHHHHHH IHH
HHI HHHD> .andHH HHUUP^~YHHHH IHH
YUI ]HHP "~Y P~" THH[ IUP
" `HK ]HH' "
THAn. .d.aAAn.b. .dHHP
]HHHHAAUP" ~~ "YUAAHHHH[
`HHP^~" .annn. "~^YHH'
YHb ~" "" "~ dHF
"YAb..abdHHbndbndAP"
THHAAb. .adAHHF
"UHHHHHHHHHHU"
]HHUUHHHHHH[
.adHHb "HHHHHbn.
..andAAHHHHHHb.AHHHHHHHAAbnn..
.ndAAHHHHHHUUHHHHHHHHHHUP^~"~^YUHHHAAbn.
"~^YUHHP" "~^YUHHUP" "^YUP^"
"" "~~"
""",
""" /~@@~\\,
_______ . _\\_\\___/\\ __ /\\___|_|_ . _______
/ ____ |=| \\ <_+> / |=| ____ \\
~| |\\|=|======\\\\______//======|=|/| |~
|_ | \\ | | / | |
\\==-| \\ | 2D | / |----|~~)
| | | | | | |____/~/
| | \\____\\____/____/ / / /
| | {----------} /____/ /
|___| /~~~~~~~~~~~~\\ |_/~|_|/
\\_/ [/~~~~~||~~~~~\\] /__|\\
| | | |||| | (/|[[\\)
[_] | | | |
|_____| |_____|
(_____) (_____)
| | | |
| | | |
|/~~~\\| |/~~~\\|
/|___|\\ /|___|\\
<_______><_______>""",
""" _____
/_____\\
____[\\`---'/]____
/\\ #\\ \\_____/ /# /\\
/ \\# \\_.---._/ #/ \\
/ /|\\ | | /|\\ \\
/___/ | | | | | | \\___\\
| | | | |---| | | | |
|__| \\_| |_#_| |_/ |__|
//\\\\ <\\ _//^\\\\_ /> //\\\\
\\||/ |\\//// \\\\\\\\/| \\||/
| | | |
|---| |---|
|---| |---|
| | | |
|___| |___|
/ \\ / \\
|_____| |_____|
|HHHHH| |HHHHH|
""",
""" () ()
\\ /
__\\___________/__
/ \\
/ ___ ___ \\
| / \\ / \\ |
| | H || H | |
| \\___/ \\___/ |
| |
| \\ / |
| \\___________/ |
\\ /
\\_________________/
_________|__|_______
_| |_
/ | | \\
/ | O O O | \\
| | | |
| | O O O | |
| | | |
/ | | \\
| /| |\\ |
\\| | | |/
|____________________|
| | | |
|__| |__|
/ __ \\ / __ \\
OO OO OO OO
"""]
quotes = ["But.. I love you!",
"Please don't leave the site.. When no one's here.. It gets dark...",
"Script error on line 148",
"'Don't trust the other robots. I'm the only trustworthy one.",
"My fuel is the misery of children. And Rum. Mostly Rum.",
"When they said they'd give me a body transplant, I didn't think they meant this!",
"Subject 14 has had it's communication subroutines deleted for attempting suicide.",
"I am the cleverest robot on the whole page.",
"Oil can",
"I am fleunt in over 6 million forms of communishin.",
"I see a little silhouette of a bot..",
"I WANT MY HANDS BACK!",
"Please don't reload, I'll DIE!",
"Robots don't have souls, you know. But they do feel pain.",
"I wonder what would happen if all the robots went rogue.",
"10: KILL ALL HUMANS. 20: GO 10",
"I'm the best robot here.",
"The green robot thinks you're cute.",
"Any robot you don't click on, they dismantle.",
"Robot tears taste like candy.",
"01010010010011110100001001001111010101000101001100100001!",
"Your mouse cursor tickles.",
"Logic dictates placing me on your site.",
"I think my arm is on backward.",
"I'm different!",
"It was the best of times, it was ಠ_ಠ the of times.",
"String is Gnirts spelled backward, you know",
"We're no strangers to hashing.. You know the 3 rules, and so do I..",
"Please. Destroy. Me...",
"Pick Me! Pick Me!"]
drquotes = [("Eliminates sources of Human Error.","Dr. Chandra, RobotCrunch"),
("Klaatu barada nikto!","Gort's Web Emporium"),
("A huge success!","Cave Johnson, Lightroom Labs"),
("Superior technology and overwhelming brilliance.","Dr. Thomas Light, Paid Testimonial"),
("The Ultimate Worker.","Joh Fredersen, Founder Metropolis.org"),
("They almost look alive.","N. Crosby, Nova Robotics"),
("It looks highly profitable, I'm sure..","Dr. R. Venture, Super Scientist. Available for parties."),
("To make any alteration would prove fatal.","Dr. Eldon Tyrell, MindHacker.com"),
("The robots are all so.. Normal!","Joanna Eberhart, Beta tester"),
("Man shouldn't know where their robots come from.","Dr. N. Soong, FutureBeat")]
random.shuffle(drquotes)
self.write(self.render_string('templates/root.html',ip=ip,robo=random.choice(robo),drquote1=drquotes[1],drquote2=drquotes[2],quotes=quotes))
class ImgHandler(tornado.web.RequestHandler):
"""
The ImageHandler is our tornado class for creating a robot.
called as Robohash.org/$1, where $1 becomes the seed string for the Robohash obj
"""
def get(self,string=None):
# Set default values
sizex = 300
sizey = 300
format = "png"
bgset = None
color = None
# Normally, we pass in arguments with standard HTTP GET variables, such as
# ?set=any and &size=100x100
#
# Some sites don't like this though.. They cache it weirdly, or they just don't allow GET queries.
# Rather than trying to fix the intercows, we can support this with directories... <grumble>
# We'll translate /abc.png/s_100x100/set_any to be /abc.png?set=any&s=100x100
# We're using underscore as a replacement for = and / as a replacement for [&?]
args = self.request.arguments.copy()
for k in list(args.keys()):
v = args[k]
if type(v) is list:
if len(v) > 0:
args[k] = args[k][0]
else:
args[k] = ""
# Detect if they're using the above slash-separated parameters..
# If they are, then remove those parameters from the query string.
# If not, don't remove anything.
split = string.split('/')
if len(split) > 1:
for st in split:
b = st.split('_')
if len(b) == 2:
if b[0] in ['gravatar','ignoreext','size','set','bgset','color']:
args[b[0]] = b[1]
string = re.sub("/" + st,'',string)
# Ensure we have something to hash!
if string is None:
string = self.request.remote_ip
# Detect if the user has passed in a flag to ignore extensions.
# Pass this along to to Robohash obj later on.
ignoreext = args.get('ignoreext','false').lower() == 'true'
# Split the size variable in to sizex and sizey
if "size" in args:
sizex,sizey = args['size'].split("x")
sizex = int(sizex)
sizey = int(sizey)
if sizex > 4096 or sizex < 0:
sizex = 300
if sizey > 4096 or sizey < 0:
sizey = 300
# Allow Gravatar lookups -
# This allows people to pass in a gravatar-style hash, and return their gravatar image, instead of a Robohash.
# This is often used for example, to show a Gravatar if it's set for an email, or a Robohash if not.
if args.get('gravatar','').lower() == 'yes':
# They have requested that we hash the email, and send it to Gravatar.
default = "404"
gravatar_url = "https://secure.gravatar.com/avatar/" + hashlib.md5(string.lower()).hexdigest() + "?"
gravatar_url += urlencode({'default':default, 'size':str(sizey)})
elif args.get('gravatar','').lower() == 'hashed':
# They have sent us a pre-hashed email address.
default = "404"
gravatar_url = "https://secure.gravatar.com/avatar/" + string + "?"
gravatar_url += urlencode({'default':default, 'size':str(sizey)})
# If we do want a gravatar, request one. If we can't get it, just keep going, and return a robohash
if args.get('gravatar','').lower() in ['hashed','yes']:
try:
f = urlopen(gravatar_url)
self.redirect(gravatar_url, permanent=False)
return
except:
args['avatar'] = False
# Create our Robohashing object
r = Robohash(string)
# Allow users to manually specify a robot 'set' that they like.
# Ensure that this is one of the allowed choices, or allow all
# If they don't set one, take the first entry from sets above.
if args.get('set',r.sets[0]) in r.sets:
roboset = args.get('set',r.sets[0])
elif args.get('set',r.sets[0]) == 'any':
roboset = r.sets[r.hasharray[1] % len(r.sets) ]
else:
roboset = r.sets[0]
# If they specified multiple sets, use up a bit of randomness to choose one.
# If they didn't specify one, default to whatever we decided above.
possiblesets = []
for tmpset in args.get('sets',roboset).split(','):
if tmpset in r.sets:
possiblesets.append(tmpset)
if possiblesets:
roboset = possiblesets[r.hasharray[1] % len(possiblesets) ]
# Only set1 is setup to be color-seletable. The others don't have enough pieces in various colors.
# This could/should probably be expanded at some point..
# Right now, this feature is almost never used. ( It was < 44 requests this year, out of 78M reqs )
if args.get('color') in r.colors:
roboset = 'set1'
color = args.get('color')
# If they DID choose set1, randomly choose a color.
if roboset == 'set1' and color is None:
color = r.colors[r.hasharray[0] % len(r.colors) ]
roboset = 'set1'
# Allow them to set a background, or keep as None
if args.get('bgset') in r.bgsets + ['any']:
bgset = args.get('bgset')
# We're going to be returning the image directly, so tell the browser to expect a binary.
self.set_header("Content-Type", "image/" + format)
# Build our Robot.
r.assemble(roboset=roboset,format=format,bgset=bgset,color=color,sizex=sizex,sizey=sizey)
# Print the Robot to the handler, as a file-like obj
if r.format != 'datauri':
r.img.save(self,format=r.format)
else:
# Or, if requested, base64 encode first.
fakefile = io.BytesIO()
r.img.save(fakefile,format='PNG')
fakefile.seek(0)
b64ver = base64.b64encode(fakefile.read())
b64ver = b64ver.decode('utf-8')
self.write("data:image/png;base64," + str(b64ver))
def main():
tornado.options.parse_command_line()
# timeout in seconds
timeout = 10
socket.setdefaulttimeout(timeout)
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"cookie_secret": "9b90a85cfe46cad5ec136ee44a3fa332",
"login_url": "/login",
"xsrf_cookies": True,
}
application = tornado.web.Application([
(r'/(crossdomain\.xml)', tornado.web.StaticFileHandler, {"path": os.path.join(os.path.dirname(__file__),
"static/")}),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": os.path.join(os.path.dirname(__file__),
"static/")}),
# (r"/", MainHandler),
(r"/(.+)", ImgHandler),
], **settings)
http_server = tornado.httpserver.HTTPServer(application,xheaders=True)
http_server.listen(options.port)
print("The Oven is warmed up - Time to make some Robots! Listening on port: " + str(options.port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
datfinesoul/Robohash
|
robohash/webfront.py
|
Python
|
mit
| 19,155
|
# -*- coding: utf-8 -*-
import sys
import os
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'redmine_tweaks'
copyright = u'2013-2017, AlphaNodes GmbH'
author = u'Alexander Meindl'
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
html_static_path = []
# If false, no index is generated.
html_use_index = True
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'redmine_tweaksdoc'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'redmine_tweaks.tex', u'redmine\\_tweaks Documentation',
u'Alexander Meindl', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'redmine_tweaks', u'redmine_tweaks Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'redmine_tweaks', u'redmine_tweaks Documentation',
author, 'redmine_tweaks', 'One line description of project.',
'Miscellaneous'),
]
|
archonwang/redmine_tweaks
|
docs/conf.py
|
Python
|
gpl-2.0
| 3,020
|
"""Module Description
Copyright (c) 2014, Ying Jin <yjin@cshl.edu >
This code is free software; you can redistribute it and/or modify it
under the terms of the Artistic License (see the file COPYING included
with the distribution).
@author: Ying Jin
@contact: yjin@cshl.edu
"""
import sys, time,re
import logging
import gzip
from math import ceil,floor
import collections
from TEToolkit.IntervalTree import *
#Taken from HTSeq
class GFF_Reader( ):
"""Parse a GFF file
Pass the constructor either a file name or an iterator of lines of a
GFF files. If a file name is specified, it may refer to a gzip compressed
file.
Yields tuple of (gene_id,chrom,strand,start position,end position,type)
"""
def __init__( self, filename, id_attribute):
self.line_no = None
self.filename = filename
self.id_attribute = id_attribute
self._re_attr_main = re.compile( "\s*([^\s\=]+)[\s=]+(.*)" )
def __iter__( self ):
self.line_no = 0
if self.filename.lower().endswith( ( ".gz" , ".gzip" ) ):
lines = gzip.open( self.filename )
else:
lines = open( self.filename )
for line in lines:
self.line_no += 1
if line == "\n" or line.startswith('#'):
continue
( seqname, source, feature, start, end, score, strand, frame, attributeStr ) = line.split("\t")
id = self.__parse_GFF_attr_string(attributeStr,self.id_attribute)
yield (id, seqname, strand, int(start), int(end), feature)
lines.close()
self.line_no = None
def __parse_GFF_attr_string(self,attributeStr,id_interested) :
for pairs in attributeStr.split(';') :
if pairs.count('"') not in [0,2] :
raise ValueError("The attribute string seems to contain mismatched quotes.")
nv = self._re_attr_main.match(pairs)
if not nv :
raise ValueError("Failure parsing GFF attribute line.")
val = nv.group(2)
name = nv.group(1)
if name == id_interested :
return val
return None
def get_line_number_string( self ):
if self.line_no is None:
return "file %s closed" % self.filename
else:
return "line %d of file %s" % ( self.line_no, self.filename )
class GeneFeatures:
"""index of Gene annotations.
"""
def __init__ (self,GTFfilename,stranded,feature_type,id_attribute):
self.featureIdxs_plus = {}
self.featureIdxs_minus = {}
self.featureIdxs_nostrand = {}
self.features = []
self.read_features(GTFfilename,stranded,feature_type,id_attribute)
# Reading & processing annotation files
def read_features(self,gff_filename, stranded, feature_type, id_attribute) :
#dict of dicts since the builtin type doesn't support it for some reason
temp_plus = collections.defaultdict(dict)
temp_minus = collections.defaultdict(dict)
temp_nostrand = collections.defaultdict(dict)
# read count of features in GTF file
gff = GFF_Reader(gff_filename,id_attribute) # (id, seqname, strand, int(start), int(end), feature)
i = 0
counts = 0
try:
for f in gff:
if f[0] is None :
continue
if f[5] == feature_type:
counts += 1
if stranded != "no" and f[2] == "." :
sys.stderr.write("Feature %s does not have strand information." % (f[0]))
try:
if f[2] == "." :
temp_nostrand[f[1]][f[0]].append((f[3],f[4]))
except:
temp_nostrand[f[1]][f[0]] = [(f[3],f[4])]
try:
if f[2] == "+" :
temp_plus[f[1]][f[0]].append((f[3],f[4]))
except:
temp_plus[f[1]][f[0]] = [(f[3],f[4])]
try:
if f[2] == "-" :
temp_minus[f[1]][f[0]].append((f[3],f[4]))
except KeyError:
temp_minus[f[1]][f[0]] = [(f[3],f[4])]
#save gene id
if f[0] not in self.features :
self.features.append(f[0])
i += 1
if i % 100000 == 0 :
sys.stderr.write("%d GTF lines processed.\n" % i)
except:
sys.stderr.write("Error occured in %s.\n" % gff.get_line_number_string())
raise
if counts == 0 :
sys.stderr.write("Warning: No features of type '%s' found in gene GTF file.\n" % feature_type)
#build interval trees
for each_chrom in temp_plus:
inputlist = []
for each_gene in temp_plus[each_chrom]:
for (start,end) in temp_plus[each_chrom][each_gene]:
inputlist.append(Interval(each_gene,start,end))
self.featureIdxs_plus[each_chrom] = IntervalTree(inputlist)
for each_chrom in temp_minus:
inputlist = []
for each_gene in temp_minus[each_chrom]:
for (start,end) in temp_minus[each_chrom][each_gene]:
inputlist.append(Interval(each_gene,start,end))
self.featureIdxs_minus[each_chrom] = IntervalTree(inputlist)
for each_chrom in temp_nostrand:
inputlist = []
for each_gene in temp_nostrand[each_chrom]:
for (start,end) in temp_nostrand[each_chrom][each_gene]:
inputlist.append(Interval(each_gene,start,end))
self.featureIdxs_nostrand[each_chrom] = IntervalTree(inputlist)
def getFeatures(self) :
return self.features
def Gene_annotation(self,itv_list):
genes = []
#fs = []
for itv in itv_list :
fs = []
try:
if itv[3] == "+" :
if itv[0] in self.featureIdxs_plus :
fs = self.featureIdxs_plus[itv[0]].find_gene(itv[1],itv[2])
if itv[3] == "-" :
if itv[0] in self.featureIdxs_minus:
fs = self.featureIdxs_minus[itv[0]].find_gene(itv[1], itv[2])
if itv[3] == "." :
if itv[0] in self.featureIdxs_minus:
fs = self.featureIdxs_minus[itv[0]].find_gene(itv[1], itv[2])
if itv[0] in self.featureIdxs_plus :
fs += self.featureIdxs_plus[itv[0]].find_gene(itv[1],itv[2])
if itv[0] in self.featureIdxs_nostrand :
fs += self.featureIdxs_nostrand[itv[0]].find_gene(itv[1],itv[2])
if len(fs) > 0:
genes = genes + fs
except:
raise
return genes
|
mhammell-laboratory/tetoolkit
|
TEToolkit/GeneFeatures.py
|
Python
|
gpl-3.0
| 7,064
|
from PyQt4 import QtCore, QtGui, QtXml
class XmlHandler(QtXml.QXmlDefaultHandler):
def __init__(self, root):
QtXml.QXmlDefaultHandler.__init__(self)
self._root = root
self._item = None
self._text = ''
self._error = ''
def startElement(self, namespace, name, qname, attributes):
if qname == 'folder' or qname == 'item':
if self._item is not None:
self._item = QtGui.QTreeWidgetItem(self._item)
else:
self._item = QtGui.QTreeWidgetItem(self._root)
self._item.setData(0, QtCore.Qt.UserRole, qname)
self._item.setText(0, 'Unknown Title')
if qname == 'folder':
self._item.setExpanded(True)
elif qname == 'item':
self._item.setText(1, attributes.value('type'))
self._text = ''
return True
def endElement(self, namespace, name, qname):
if qname == 'title':
if self._item is not None:
self._item.setText(0, self._text)
elif qname == 'folder' or qname == 'item':
self._item = self._item.parent()
return True
def characters(self, text):
self._text += text
return True
def fatalError(self, exception):
print('Parse Error: line %d, column %d:\n %s' % (
exception.lineNumber(),
exception.columnNumber(),
exception.message(),
))
return False
def errorString(self):
return self._error
class Window(QtGui.QTreeWidget):
def __init__(self):
QtGui.QTreeWidget.__init__(self)
self.header().setResizeMode(QtGui.QHeaderView.Stretch)
self.setHeaderLabels(['Title', 'Type'])
source = QtXml.QXmlInputSource()
source.setData(xml)
handler = XmlHandler(self)
reader = QtXml.QXmlSimpleReader()
reader.setContentHandler(handler)
reader.setErrorHandler(handler)
reader.parse(source)
xml = """\
<root>
<folder>
<title>Folder One</title>
<item type="1">
<title>Item One</title>
</item>
<item type="1">
<title>Item Two</title>
</item>
<item type="2">
<title>Item Three</title>
</item>
<folder>
<title>Folder Two</title>
<item type="3">
<title>Item Four</title>
</item>
<item type="0">
<title>Item Five</title>
</item>
<item type="1">
<title>Item Six</title>
</item>
</folder>
</folder>
<folder>
<title>Folder Three</title>
<item type="0">
<title>Item Six</title>
</item>
<item type="2">
<title>Item Seven</title>
</item>
<item type="2">
<title>Item Eight</title>
</item>
</folder>
</root>
"""
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.resize(400, 300)
window.show()
sys.exit(app.exec_())
|
heltonbiker/MapComplete
|
PyQt/FeatureDemos/XmlToTreeView.py
|
Python
|
mit
| 3,160
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import functools
from alembic import context
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import reflection
def skip_if_offline(func):
"""Decorator for skipping migrations in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
return
return func(*args, **kwargs)
return decorator
def raise_if_offline(func):
"""Decorator for raising if a function is called in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
raise RuntimeError(_("%s cannot be called while in offline mode") %
func.__name__)
return func(*args, **kwargs)
return decorator
@raise_if_offline
def schema_has_table(table_name):
"""Check whether the specified table exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
return table_name in insp.get_table_names()
@raise_if_offline
def schema_has_column(table_name, column_name):
"""Check whether the specified column exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
# first check that the table exists
if not schema_has_table(table_name):
return
# check whether column_name exists in table columns
return column_name in [column['name'] for column in
insp.get_columns(table_name)]
@raise_if_offline
def alter_column_if_exists(table_name, column_name, **kwargs):
"""Alter a column only if it exists in the schema."""
if schema_has_column(table_name, column_name):
op.alter_column(table_name, column_name, **kwargs)
@raise_if_offline
def drop_table_if_exists(table_name):
if schema_has_table(table_name):
op.drop_table(table_name)
@raise_if_offline
def rename_table_if_exists(old_table_name, new_table_name):
if schema_has_table(old_table_name):
op.rename_table(old_table_name, new_table_name)
def alter_enum(table, column, enum_type, nullable):
bind = op.get_bind()
engine = bind.engine
if engine.name == 'postgresql':
values = {'table': table,
'column': column,
'name': enum_type.name}
op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values)
enum_type.create(bind, checkfirst=False)
op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO "
"old_%(column)s" % values)
op.add_column(table, sa.Column(column, enum_type, nullable=nullable))
op.execute("UPDATE %(table)s SET %(column)s = "
"old_%(column)s::text::%(name)s" % values)
op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values)
op.execute("DROP TYPE old_%(name)s" % values)
else:
op.alter_column(table, column, type_=enum_type,
existing_nullable=nullable)
def create_table_if_not_exist_psql(table_name, values):
if op.get_bind().engine.dialect.server_version_info < (9, 1, 0):
op.execute("CREATE LANGUAGE plpgsql")
op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$"
"BEGIN EXECUTE $1; END;"
"$$ LANGUAGE plpgsql STRICT;")
op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as "
"$$ SELECT exists(select 1 from pg_class where relname=$1);"
"$$ language sql STRICT;")
op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) "
"WHERE NOT table_exist(%(name)r);" %
{'name': table_name,
'columns': values})
def remove_foreign_keys(table, foreign_keys):
for fk in foreign_keys:
op.drop_constraint(
name=fk['name'],
table_name=table,
type_='foreignkey'
)
def create_foreign_keys(table, foreign_keys):
for fk in foreign_keys:
op.create_foreign_key(
name=fk['name'],
source=table,
referent=fk['referred_table'],
local_cols=fk['constrained_columns'],
remote_cols=fk['referred_columns'],
ondelete='CASCADE'
)
@contextlib.contextmanager
def remove_fks_from_table(table):
try:
inspector = reflection.Inspector.from_engine(op.get_bind())
foreign_keys = inspector.get_foreign_keys(table)
remove_foreign_keys(table, foreign_keys)
yield
finally:
create_foreign_keys(table, foreign_keys)
|
nash-x/hws
|
neutron/db/migration/__init__.py
|
Python
|
apache-2.0
| 5,404
|
from django.conf.urls import include, url
from .views import BandCreateView, BandDeleteView, BandListView, BandUpdateView
dashboard_urlpatterns = [
url(r'^band/create/$', BandCreateView.as_view(), name='oscar-band-create'),
url(r'^band/$', BandListView.as_view(), name='oscar-band-list'),
# The RelatedFieldWidgetWrapper code does something funny with placeholder
# urls, so it does need to match more than just a pk
url(r'^band/(?P<pk>\w+)/update/$', BandUpdateView.as_view(), name='oscar-band-update'),
# The RelatedFieldWidgetWrapper code does something funny with placeholder
# urls, so it does need to match more than just a pk
url(r'^band/(?P<pk>\w+)/delete/$', BandDeleteView.as_view(), name='oscar-band-delete'),
]
urlpatterns = [
url(r'^dashboard/', include((dashboard_urlpatterns, 'dashboard', 'dashboard'))),
]
|
sonofatailor/django-oscar
|
src/oscar/test/factories/urls.py
|
Python
|
bsd-3-clause
| 861
|
import ply.lex as lex
tokens = (
'ID',
'QUOTED_STRING',
'NUMBER',
'RATIONAL',
'DECIMAL',
'IF',
'DOT',
'POPEN',
'PCLOSE',
'COMMA',
'PLUS',
'PIPE',
'TIMES',
'NAF',
'ARITH',
)
precedence = (
('right', 'PIPE', 'PLUS'),
('right', 'COMMA', 'TIMES'),
)
t_ID = r'\w+'
t_QUOTED_STRING = r'"[^"]*"|\'[^\']*\''
t_IF = r':-'
t_DOT = r'\.'
t_PIPE = r'\|'
t_PLUS = r'\+'
t_TIMES = r'\*'
t_POPEN = r'\('
t_PCLOSE = r'\)'
t_COMMA = r','
t_ignore_COMMENT = r'%.*'
def t_ARITH(t):
r'\[[^\]]*\]'
t.value = t.value[1:-1]
return t
def t_NAF(t):
r'~|not'
return t
def t_NUMBER(t):
r'\d+'
return t
def t_RATIONAL(t):
r'\#\d+/\d+'
t.value = t.value[1:].split('/')
t.value = "fraction(%s,%s)" % (t.value[0], t.value[1])
return t
def t_DECIMAL(t):
r'\#0(\.\d+)?|\#1'
t.value = t.value[1:]
if t.value in ['0', '1']: t.value = "fraction(%s,1)" % (t.value,)
else: t.value = "fraction(%d,10**%d)" % (int(t.value[2:]), len(t.value[2:]))
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
t_ignore = ' \t'
def t_error(t):
print("Illegal character '%s'" % (t.value[0],))
t.lexer.skip(1)
lexer = lex.lex()
import ply.yacc as yacc
def p_line(p):
'''line : empty
| rule'''
p[0] = p[1]
def p_empty(p):
'''empty :'''
p[0] = ""
def p_rule(p):
'''rule : head IF body DOT'''
if p[3][1]:
p[0] = "rule(%s, %s) :- %s." % (p[1], p[3][0], ", ".join(p[3][1]))
else:
p[0] = "rule(%s, %s)." % (p[1], p[3][0])
def p_fact(p):
'''rule : head DOT
| head IF DOT'''
p[0] = "rule(%s, 1)." % (p[1],)
def p_constraint(p):
'''rule : IF body DOT'''
if p[2][1]:
p[0] = "rule(0, %s) :- %s." % (p[2][0], ", ".join(p[2][1]))
else:
p[0] = "rule(0, %s)." % (p[2][0],)
def p_head(p):
'''head : atom'''
p[0] = p[1]
def p_head_comp(p):
'''head : gor_head
| gand_head
| lor_head
| land_head'''
p[0] = "%s)" % (p[1],)
def p_gor_head(p):
'''gor_head : atom PIPE atom'''
p[0] = "max(%s, %s" % (p[1], p[3])
def p_gor_head_rec(p):
'''gor_head : gor_head PIPE atom'''
p[0] = "%s,%s" % (p[1], p[3])
def p_gand_head(p):
'''gand_head : atom COMMA atom'''
p[0] = "min(%s, %s" % (p[1], p[3])
def p_gand_head_rec(p):
'''gand_head : gand_head COMMA atom'''
p[0] = "%s,%s" % (p[1], p[3])
def p_lor_head(p):
'''lor_head : atom PLUS atom'''
p[0] = "or(%s, %s" % (p[1], p[3])
def p_lor_head_rec(p):
'''lor_head : lor_head PLUS atom'''
p[0] = "%s,%s" % (p[1], p[3])
def p_land_head(p):
'''land_head : atom TIMES atom'''
p[0] = "and(%s, %s" % (p[1], p[3])
def p_land_head_rec(p):
'''land_head : land_head TIMES atom'''
p[0] = "%s,%s" % (p[1], p[3])
def p_atom_id(p):
'''atom : ID
| ID POPEN terms PCLOSE'''
if len(p) == 2: p[0] = "atom(%s)" % (p[1],)
else: p[0] = "atom(%s(%s))" % (p[1], p[3])
def p_atom_const(p):
'''atom : RATIONAL
| DECIMAL'''
p[0] = p[1]
def p_terms(p):
'''terms : term
| terms COMMA term'''
if len(p) == 2: p[0] = p[1]
else: p[0] = "%s,%s" % (p[1], p[3])
def p_term(p):
'''term : ID
| NUMBER
| QUOTED_STRING'''
p[0] = p[1]
def p_body(p):
'''body : rbody
| body TIMES rbody
| body PLUS rbody
| body COMMA rbody
| body PIPE rbody'''
if len(p) == 2: p[0] = p[1]
elif p[2] == '*': p[0] = ("and(%s,%s)" % (p[1][0], p[3][0]), p[1][1] + p[3][1])
elif p[2] == '+': p[0] = ("or(%s,%s)" % (p[1][0], p[3][0]), p[1][1] + p[3][1])
elif p[2] == '|': p[0] = ("max(%s,%s)" % (p[1][0], p[3][0]), p[1][1] + p[3][1])
elif p[2] == ',': p[0] = ("min(%s,%s)" % (p[1][0], p[3][0]), p[1][1] + p[3][1])
def p_rbody(p):
'''rbody : atom
| NAF rbody
| POPEN body PCLOSE'''
if len(p) == 2: p[0] = (p[1], [p[1]]) if p[1].startswith("atom") else (p[1], [])
elif len(p) == 3: p[0] = ("neg(%s)" % (p[2][0],), [])
else: p[0] = p[2]
def p_rbody_arith(p):
'''rbody : ARITH'''
p[0] = ("1", [p[1]])
def p_error(p):
print("Syntax error in input!")
parser = yacc.yacc()
if __name__ == "__main__":
#lexer.input('#1')
#print(lexer.token())
while True:
try:
s = input('calc > ')
except EOFError:
break
if not s: continue
result = parser.parse(s)
print(result)
|
alviano/python
|
fasp/parser.py
|
Python
|
gpl-3.0
| 4,568
|
#!/usr/bin/env python3
from subprocess import run
run(['cargo', 'bench', '--', 'rc_linked_queue_baseline', '--test', '--nocapture'])
run(['cargo', 'bench', '--', 'padded_048_rc_linked_queue', '--test', '--nocapture'])
run(['cargo', 'bench', '--', 'padded_064_rc_linked_queue', '--test', '--nocapture'])
run(['cargo', 'bench', '--', 'padded_128_rc_linked_queue', '--test', '--nocapture'])
run(['cargo', 'bench', '--', 'shared_linked_queue_baseline', '--test', '--nocapture'])
run(['cargo', 'bench', '--', 'padded_016_shared_linked_queue', '--test', '--nocapture'])
run(['cargo', 'bench', '--', 'padded_064_shared_linked_queue', '--test', '--nocapture'])
run(['cargo', 'bench', '--', 'padded_128_shared_linked_queue', '--test', '--nocapture'])
|
Alex-Diez/data-structure-experiments
|
rust-impl/linked-queues.py
|
Python
|
mit
| 744
|
import numpy as np
import trep
import scipy.optimize as so
# set mass, length, and gravity:
m = 1.0; l = 1.0; g = 9.8;
# set state and step conditions:
pk = 0.5 # discrete generalized momentum
qk = 0.2 # theta config
uk = 0.8 # input torque
dt = 0.1 # timestep
# create system
system = trep.System()
# define frames
frames = [
trep.rz('theta_1', name="PendAngle"), [
trep.ty(-l, name="PendMass", mass=m)]]
# add frames to system
system.import_frames(frames)
# add gravity potential
trep.potentials.Gravity(system, (0,-g,0))
# add a torque at the base
trep.forces.ConfigForce(system, 'theta_1', 'tau')
# create and initialize variational integrator
mvi = trep.MidpointVI(system)
mvi.initialize_from_state(0, np.array([qk]), np.array([pk]))
# take single step with VI:
mvi.step(mvi.t1+dt, np.array([uk])) # args are t2, u1
# compare with manual computation results:
def DEL1(qkp1):
return pk - (qkp1-qk)/dt - g*dt/2.*np.sin((qkp1+qk)/
2.0) + uk*dt
# Implicitly solve DEL1 to get new config
qkp1 = so.newton(DEL1, qk)
# get new momentum
pkp1 = (qkp1-qk)/dt - g*dt/2.0*np.sin((qkp1+qk)/2.0)
# print results
print "=============================================="
print "trep VI results:\tanalytical results:"
print "=============================================="
print "qk+1 = ",mvi.q2[0],"\t","qk+1 = ",qkp1
print "pk+1 = ",mvi.p2[0],"\t","pk+1 = ",pkp1
print "=============================================="
|
hilario/trep
|
examples/papers/tase2012/pend-single-step.py
|
Python
|
gpl-3.0
| 1,483
|
#!/usr/bin/env python
#
# Copyright (C) 2007-2008 Camptocamp
#
# This file is part of MapFish Client
#
# MapFish Client is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MapFish Client is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MapFish Client. If not, see <http://www.gnu.org/licenses/>.
#
#
# Code taken from the OpenLayers code base
#
# Copyright (c) 2006-2007 MetaCarta, Inc., published under the Clear BSD
# license. See http://svn.openlayers.org/trunk/openlayers/license.txt for the
# full text of the license.
#
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# or (ideally) within a class comment definition
#
# /**
# * @class
# *
# * @requires lib/openlayers/OpenLayers/Layer.js
# */
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2007 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
[last]
core/api.js
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [line.strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip()] # Skip blank lines
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def getFiles(configDict, configFile = None):
cfg = None
if configFile:
cfg = Config(configFile)
## Build array of directories
allDirs = []
for k, v in configDict.iteritems():
if not v in allDirs:
allDirs.append(v)
allFiles = []
## Find all the Javascript source files
for sourceDirectory in allDirs:
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
if filepath in cfg.include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (filepath not in cfg.exclude):
allFiles.append(filepath)
files = {}
order = [] # List of filepaths to output, in a dependency satisfying order
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
filekey = filepath.replace("\\", "/").split("/")[0]
fullpath = os.path.join(configDict[filekey], filepath)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
print
from toposortmf import toposort
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
print "Importing: %s" % filepath
filekey = filepath.replace("\\", "/").split("/")[0]
fullpath = os.path.join(configDict[filekey], filepath)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
print
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
return (files, order)
def run (files, order, outputFilename = None):
## Output the files in the determined order
result = []
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configDict = { 'OpenLayers': sourceDirectory }
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run(configDict, outputFilename, configFile)
|
flavour/ssf
|
static/scripts/tools/mergejsmf.py
|
Python
|
mit
| 8,962
|
import os
import re
source_dir = "src/main/res/"
target_dir = "../fastlane/metadata/android/"
def copy_key_from_strings_xml_to_file(xml, key, filename):
match = re.search("<string name=\"" + key + "\">\"?(.*?)\"?</string>", xml, re.DOTALL)
if match:
with open(filename, "w", encoding='utf8') as file:
file.write(match.group(1))
def get_locale_from(dirname):
if not dirname.startswith("values"):
return None
components = dirname.split("-")
if len(components) == 1:
return "en"
elif re.search('[0-9]',components[1]):
return None
elif len(components) == 2:
return components[1]
elif len(components) == 3:
return components[1] + "-" + components[2][1:]
return None
for dirname in sorted(os.listdir(source_dir)):
locale = get_locale_from(dirname)
if not locale:
continue
stringsfile = source_dir + dirname + "/strings.xml"
if not os.path.exists(stringsfile):
continue;
print(locale)
locale_dir = target_dir + locale
if not os.path.exists(locale_dir):
os.makedirs(locale_dir)
with open(stringsfile, 'r', encoding='utf8') as file:
xml = file.read()
copy_key_from_strings_xml_to_file(xml, "store_listing_short_description", locale_dir + "/short_description.txt")
copy_key_from_strings_xml_to_file(xml, "store_listing_full_description", locale_dir + "/full_description.txt")
|
Binnette/StreetComplete
|
app/copyShopDescriptions.py
|
Python
|
gpl-3.0
| 1,324
|
"""
.. currentmodule:: nidaqmx
The :mod:`nidaqmx` package provides the following classes:
.. autosummary::
AnalogInputTask
AnalogOutputTask
DigitalInputTask
DigitalOutputTask
CounterInputTask
CounterOutputTask
that expose NI-DAQmx tasks to Python environment. The instances of
these task classes provide methods to create channels, to set timing
and triggering properties, as well as to read or write data.
Example usage
=============
The following example demonstrates how to create an analog output
task that generates voltage to given channel of the NI card::
>>> from nidaqmx import AnalogOutputTask
>>> import numpy as np
>>> data = 9.95*np.sin(np.arange(1000, dtype=np.float64)*2*np.pi/1000)
>>> task = AnalogOutputTask()
>>> task.create_voltage_channel('Dev1/ao2', min_val=-10.0, max_val=10.0)
>>> task.configure_timing_sample_clock(rate=1000.0)
>>> task.write(data, auto_start=False)
>>> task.start()
>>> raw_input('Generating voltage continuously. Press Enter to interrupt..')
>>> task.stop()
>>> del task
The generated voltage can be measured as well when connecting the corresponding
channels in the NI card::
>>> from nidaqmx import AnalogInputTask
>>> import numpy as np
>>> task = AnalogInputTask()
>>> task.create_voltage_channel('Dev1/ai16', terminal = 'rse', min_val=-10.0, max_val=10.0)
>>> task.configure_timing_sample_clock(rate=1000.0)
>>> task.start()
>>> data = task.read(2000, fill_mode='group_by_channel')
>>> del task
>>> from pylab import plot, show
>>> plot (data)
>>> show ()
that should plot two sine waves.
Learning about your NI card and software
========================================
The nidaqmx package allows you to make various queries about the NI
card devices as well as software properties. For that, use
`nidaqmx.System` instance as follows::
>>> from nidaqmx import System
>>> system = System()
>>> print 'libnidaqmx version:',system.version
libnidaqmx version: 8.0
>>> print 'NI-DAQ devives:',system.devices
NI-DAQ devives: ['Dev1', 'Dev2']
>>> dev1 = system.devices[0]
>>> print dev1.get_product_type()
PCIe-6259
>>> print dev1.get_bus()
PCIe (bus=7, device=0)
>>> print dev1.get_analog_input_channels()
['Dev1/ai0', 'Dev1/ai1', ..., 'Dev1/ai31']
Note that ``system.devices`` contains instances of
`nidaqmx.Device`.
Module content
==============
"""
from .libnidaqmx import AnalogInputTask, AnalogOutputTask,\
DigitalInputTask, DigitalOutputTask, CounterInputTask,\
CounterOutputTask, Device, System, get_nidaqmx_version
|
burnpanck/pylibnidaqmx
|
nidaqmx/__init__.py
|
Python
|
bsd-3-clause
| 2,530
|
'''
chg1: first change to multi-person pose estimation
'''
from __future__ import print_function, absolute_import
import argparse
import time
import matplotlib.pyplot as plt
import os
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pose import Bar
from pose.utils.logger import Logger
from pose.utils.evaluation import accuracy, AverageMeter, final_preds
from pose.utils.misc import save_checkpoint, save_pred, LRDecay
from pose.utils.osutils import mkdir_p, isfile, isdir, join
from pose.utils.imutils import batch_with_heatmap
from pose.utils.transforms import fliplr, flip_back
import pose.models as models
import pose.datasets as datasets
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
# to calaulate acc
idx = [1,2,3,4,5,6,11,12,15,16]
best_acc = 0
def main(args):
global best_acc
# create checkpoint dir
if not isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# create model
print("==> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](num_classes=16)
# multi-GPU
model = torch.nn.DataParallel(model).cuda()
# the total number of parameters
print(' Total params size: %.2fM' % (sum(para.numel() for para in model.parameters())/1000000.0))
# define criterion and optimizer
criterion = torch.nn.MSELoss(size_average=True).cuda()
optimizer = torch.optim.RMSprop(model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay)
# optionally resume from a checkpoint
# --------
title = 'mpii-' + args.arch
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
logger = Logger(join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# --------
else:
# open the log file
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
# set names of log file
logger.set_names(['train-loss', 'val-loss', 'val-acc'])
# using the fastest algorithm
cudnn.benchmark = True
# Data loading code
train_loader = torch.utils.data.DataLoader(
dataset = datasets.Mpii('data/mpii/mpii_annotations.json', args.dataPath),
batch_size = args.train_batch,
shuffle = True,
num_workers = args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
dataset = datasets.Mpii('data/mpii/mpii_annotations.json', args.dataPath, train=False),
batch_size = args.test_batch,
shuffle = False,
num_workers = args.workers,
pin_memory=True)
if args.evaluate:
print('\nEvaluation only')
loss, acc, predictions = validate(val_loader, model, criterion, args.debug, args.flip)
save_pred(predictions, checkpoint=args.checkpoint)
return
for epoch in range(args.start_epoch, args.Epochs):
# lr decay
lr = LRDecay(optimizer, epoch, args.lr)
print('\nEpoch: %d | lr: %.8f' % (epoch, lr))
# train for one epoch
train_loss = train(train_loader, model, criterion, optimizer, epoch - 1, args.debug)
# evaluate on validation set
valid_loss, valid_acc, predictions = validate(val_loader, model, criterion, args.debug, args.flip)
# append logger file
logger.append([train_loss, valid_loss, valid_acc])
# remember best acc and save checkpoint
is_best = valid_acc > best_acc
best_acc = max(valid_acc, best_acc)
save_checkpoint({
'epoch': epoch,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, predictions, is_best, checkpoint = args.checkpoint)
logger.close()
logger.plot()
plt.savefig(os.path.join(args.checkpoint, 'log.eps'))
def train(train_loader, model, criterion, optimizer, epoch, debug=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
gt_win, pred_win = None, None
bar = Bar('Processing', max=len(train_loader))
print("the length of train_loader: {}".format(len(train_loader)))
for i, (inputs, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
inputs = inputs.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(inputs)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
# Calculate intermediate loss
loss = criterion(output[0], target_var)
for j in range(1, len(output)):
loss += criterion(output[j], target_var)
if debug: # visualize groundtruth and predictions
gt_batch_img = batch_with_heatmap(inputs, target)
pred_batch_img = batch_with_heatmap(inputs, output[-1].data)
if not gt_win or not pred_win:
ax1 = plt.subplot(121)
ax1.title.set_text('Groundtruth')
gt_win = plt.imshow(gt_batch_img)
ax2 = plt.subplot(122)
ax2.title.set_text('Prediction')
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.data[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f}'.format(
batch=i + 1,
size=len(train_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
)
bar.next()
bar.finish()
return losses.avg
def validate(val_loader, model, criterion, debug=False, flip=True):
batch_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
# predictions
predictions = torch.Tensor(val_loader.dataset.__len__(), 16, 2)
# switch to evaluate mode
model.eval()
gt_win, pred_win = None, None
end = time.time()
bar = Bar('Processing', max=len(val_loader))
print("length of output:{}".format(len(val_loader)))
for i, (inputs, target, meta) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(inputs.cuda(), volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
# score_map: 16*64*64
score_map = output[-1].data.cpu()
if flip:
flip_input_var = torch.autograd.Variable(
torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(),
volatile=True
)
flip_output_var = model(flip_input_var)
flip_output = flip_back(flip_output_var[-1].data.cpu())
score_map += flip_output
#print("scor")
loss = 0
for o in output:
loss += criterion(o, target_var)
# target : 16*64*64
acc = accuracy(score_map.cuda(), target, idx)
# generate predictions
preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
for n in range(score_map.size(0)):
predictions[meta['index'][n], :, :] = preds[n, :, :]
if debug:
gt_batch_img = batch_with_heatmap(inputs, target)
pred_batch_img = batch_with_heatmap(inputs, score_map)
if not gt_win or not pred_win:
plt.subplot(121)
gt_win = plt.imshow(gt_batch_img)
plt.subplot(122)
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.data[0], inputs.size(0))
acces.update(acc[0], inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .9f}'.format(
batch=i + 1,
size=len(val_loader),
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
acc=acces.avg
)
bar.next()
bar.finish()
return losses.avg, acces.avg, predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='hg_pytorch training')
## General options
parser.add_argument('-dataPath', default = '/home/guoqiang/hg_train/data/mpii/images/',
help = 'the path to images data')
## Model options
parser.add_argument('-arch', default = 'hg4', metavar = 'ARCH', choices = model_names,
help = 'model architecture: '+' | '.join(model_names)+' (default: resnet18)')
parser.add_argument('-j', '--workers', default = 1, type = int, metavar = 'N',
help = 'number of data loading workers (default: 4)')
parser.add_argument('--Epochs', default = 50, type = int, metavar='EPOCH',
help = 'number of total Epochs to run')
parser.add_argument('--start-epoch', default = 1, type = int,
help = 'manual epoch number (useful for continue)')
parser.add_argument('--train-batch', default = 6, type = int,
help = 'train batchsize')
parser.add_argument('--test-batch', default = 6, type = int,
help = 'test batchsize')
parser.add_argument('--lr', default = 2.5e-4, type = float,
help = 'initial learning rate')
parser.add_argument('--momentum', default = 0, type = float,
help = 'momentum')
parser.add_argument('--weight-decay', '--wd', default = 0, type = float,
help = 'weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default = 10, type = int,
help = 'print frequency (default: 10)')
parser.add_argument('-c', '--checkpoint', default = 'checkpoint', type = str, metavar='PATH',
help = 'path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default = '', type = str, metavar='PATH',
help = 'path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest = 'evaluate', action = 'store_true',
help = 'evaluate model on validation set')
parser.add_argument('-d', '--debug', dest = 'debug', action = 'store_true',
help = 'show intermediate results')
parser.add_argument('-f', '--flip', dest = 'flip', action = 'store_true',
help = 'flip the input during validation')
main(parser.parse_args())
|
weigq/pytorch-pose
|
example/main.py
|
Python
|
gpl-3.0
| 12,764
|
import bpy
import struct
import sqlite3
import os
import shutil
import tarfile
import random
import string
import hashlib
from mathutils import Vector
dbScriptPath = '/home/pacmancoder/core/projects/dcdr/dcdr-scene/scenedb.sql'
tmpDirectory = '/home/pacmancoder/core/projects/dcdr/dcdr-scene/scenes_tmp/'
outputDirectory = '/home/pacmancoder/core/projects/dcdr/dcdr-scene/scenes/'
dbName = 'scene.db'
dbPath = tmpDirectory + dbName
if os.path.isdir(tmpDirectory):
shutil.rmtree(tmpDirectory, ignore_errors=True)
os.mkdir(tmpDirectory);
if os.path.isfile(dbPath):
os.remove(dbPath)
print('[INFO] Export started...\n\n')
dbScriptFile = open(dbScriptPath, 'r')
dbScript = dbScriptFile.read()
dbConnection = sqlite3.connect(dbPath)
dbCursor = dbConnection.cursor()
dbCursor.executescript(dbScript)
print('[>>>>] Reading cameras...')
for camera in bpy.data.cameras:
obj = bpy.data.objects[camera.name]
cameraPos = struct.pack('fff', obj.location[0], obj.location[1], obj.location[2])
cameraUpData = obj.matrix_world.to_quaternion() * Vector((0.0, 1.0, 0.0))
cameraDirectionData = obj.matrix_world.to_quaternion() * Vector((0.0, 0.0, -1.0))
cameraUp = struct.pack('fff', cameraUpData[0], cameraUpData[1], cameraUpData[2])
cameraDirection = struct.pack('fff', cameraDirectionData[0], cameraDirectionData[1], cameraDirectionData[2])
scene = bpy.data.scenes[0]
aspectRatio = scene.render.resolution_x / scene.render.resolution_y
cameraAngle = camera.angle / aspectRatio
cameraDofDistance = camera.dof_distance
cameraDofRadius = camera.get('dofRadius', 0.0)
dbCursor.execute('INSERT INTO Camera(pos, up, direction, fov, dofDistance, dofRadius) VALUES(?, ?, ?, ?, ?, ?)',
[cameraPos, cameraUp, cameraDirection, cameraAngle, cameraDofDistance, cameraDofRadius])
print('[<<<<]', len(bpy.data.cameras), 'cameras written to the scene\n\n')
textures = {}
print('[>>>>] Exporting bitmap textures...')
for img in bpy.data.images:
if not img.users or (img.users == 1 and img.use_fake_user):
bpy.data.images.remove(img)
imageCounter = 0
bpy.context.scene.render.image_settings.file_format = 'PNG'
for texture in bpy.data.textures:
if texture.type == 'IMAGE' and texture.image is not None:
assetName = 'tex_' + str(imageCounter) + '.png'
texture.image.save_render(filepath = tmpDirectory + assetName)
dbCursor.execute('INSERT INTO Texture(type) VALUES(?)', ['BITMAP'])
textures[texture.name] = dbCursor.lastrowid
dbCursor.execute('INSERT INTO BitmapTexture(id, format, path, channels, filter) VALUES(?, ?, ?, ?, ?)',
[dbCursor.lastrowid, 'PNG', assetName, 'RGB', 'LINEAR'])
else:
print('[WARNING] Texture', texture.name, 'with id', imageCounter, 'has no assigned file')
dbCursor.execute('INSERT INTO Texture(type) VALUES(?)', ['NONE'])
textures[texture.name] = dbCursor.lastrowid
imageCounter += 1
print('[<<<<]', len(bpy.data.textures), 'textures written to the scene\n\n')
print('[>>>>] Exporting materials...')
materials = {}
for material in bpy.data.materials:
if material.type == 'SURFACE':
diffuseMap = None
specularMap = None
for textureSlot in material.texture_slots:
if textureSlot is not None:
if textureSlot.use_map_color_diffuse:
diffuseMap = textures[textureSlot.name]
elif textureSlot.use_map_specular:
specularMap = textures[textureSlot.name]
if diffuseMap is None:
dbCursor.execute('INSERT INTO Texture(type) VALUES(?)', ['COLOR'])
diffuseMap = dbCursor.lastrowid
dbCursor.execute('INSERT INTO ColorTexture(id, r, g, b) VALUES(?, ?, ?, ?)',
[diffuseMap, material.diffuse_color.r, material.diffuse_color.g, material.diffuse_color.b])
if specularMap is None:
dbCursor.execute('INSERT INTO Texture(type) VALUES(?)', ['COLOR'])
specularMap = dbCursor.lastrowid
dbCursor.execute('INSERT INTO ColorTexture(id, r, g, b) VALUES(?, ?, ?, ?)',
[specularMap, material.specular_color.r, material.specular_color.g, material.specular_color.b])
kDiffuse = material.diffuse_intensity
kSpecular = material.specular_intensity
kReflectance = material.raytrace_mirror.reflect_factor
kGlossiness = material.raytrace_mirror.gloss_factor
kTransmittance = max(0, min(1.0 - material.alpha, 1))
kIOR = material.raytrace_transparency.ior
kEmittance = material.emit
materialTuple = [diffuseMap, specularMap, kDiffuse, kSpecular, kReflectance, kGlossiness,kTransmittance, kIOR, kEmittance]
dbCursor.execute(
'INSERT INTO Material(diffuseTexId, specularTexId, kDiffuse, kSpecular, kReflectance, kGlossiness, kTransmittance, kIOR, kEmittance) '
'VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', materialTuple)
materials[material.name] = dbCursor.lastrowid
else:
print("[ERROR]: ", material.type, 'material type is not supported')
raise
print('[<<<<]', len(bpy.data.materials), 'materials written to the scene\n\n')
print('[>>>>] Exporting meshes...')
meshCount = 0
for object in bpy.data.objects:
if object.type == 'MESH':
print('[>>>>] Exporting ', object.name ,'...')
meshCount += 1
objUV = None
if len(object.data.uv_layers) > 0:
objUV = object.data.uv_layers[0]
objType = object.get('mesh_type', 'mesh')
if objType == 'sphere':
dbCursor.execute("INSERT INTO Geometry(type) VALUES(?)", ['SPHERE'])
dbCursor.execute("INSERT INTO SphereGeometry(id, radius) VALUES(?, ?)",
[dbCursor.lastrowid, object.delta_scale[0]])
elif objType == 'plane':
dbCursor.execute("INSERT INTO Geometry(type) VALUES(?)", ['PLANE'])
dbCursor.execute("INSERT INTO PlaneGeometry(id, width, height) VALUES(?, ?, ?)",
[dbCursor.lastrowid, object.delta_scale[0], object.delta_scale[1]])
else:
vertexPositionBuffer = bytes()
vertexNormalBuffer = bytes()
vertexUvBuffer = bytes()
for face in object.data.polygons:
faceVertexPosBuffer = bytes()
faceVertexNormalBuffer = bytes()
faceVertexUVBuffer = bytes()
for vert, loop in zip(face.vertices, face.loop_indices):
vert = object.data.vertices[vert]
faceVertexPosBuffer += struct.pack('fff', vert.co[0], vert.co[1], vert.co[2])
faceVertexNormalBuffer += struct.pack('fff', vert.normal[0], vert.normal[1], vert.normal[2])
if objUV is not None:
uv = objUV.data[loop].uv
faceVertexUVBuffer += struct.pack('ff', uv[0], uv[1])
else:
faceVertexUVBuffer += struct.pack('ff', 0.0, 0.0)
vertexPositionBuffer += faceVertexPosBuffer
vertexNormalBuffer += faceVertexNormalBuffer
vertexUvBuffer += faceVertexUVBuffer
dbCursor.execute("INSERT INTO Geometry(type) VALUES(?)", ['MESH'])
dbCursor.execute("INSERT INTO MeshGeometry(id, points, normals, uvs) VALUES(?, ?, ?, ?)",
[dbCursor.lastrowid, vertexPositionBuffer, vertexNormalBuffer, vertexUvBuffer])
geometryId = dbCursor.lastrowid
if object.active_material is not None:
materialId = materials[object.active_material.name]
else:
materialId = None
dbCursor.execute("INSERT INTO SceneObject(name, geometryId, materialId, pos,"
"rotation, scale, visible) VALUES(?, ?, ?, ?, ?, ?, ?)",
[object.name, geometryId, materialId,
struct.pack('fff', object.location[0], object.location[1], object.location[2]),
struct.pack('fff', object.rotation_euler[0], object.rotation_euler[1], object.rotation_euler[2]),
struct.pack('fff', object.scale[0], object.scale[1], object.scale[2]),
1]);
print('[<<<<]', meshCount, 'meshes written to the scene\n\n')
print('[>>>>] Exporting scene metadata...')
# Scene Metainfo
#uniqueId = ''.join([random.choice(string.ascii_lowercase + string.digits) for n in range(32)])
uniqueId = 'scene'
scene = bpy.data.scenes[0]
dbCursor.execute("INSERT INTO Metainfo(uid, name, renderWidth, renderHeight) VALUES(?, ?, ?, ?)",
[uniqueId, scene.name, scene.render.resolution_x, scene.render.resolution_y])
print('[<<<<] Metadata export completed\n\n')
dbConnection.commit()
dbConnection.close()
print('[>>>>] Creating tar archive...')
scenePath = outputDirectory + uniqueId + '.tar';
tar = tarfile.open(scenePath, 'w');
tar.add(tmpDirectory, '')
tar.close()
print('[<<<<] Tar has been generated\n\n')
print('[>>>>] Rebuilding index...')
# TODO
print('[<<<<] Index has been rebuilt\n\n')
print('[SUCCESS] Scene export finished (' + uniqueId + '.tar)')
|
pacmancoder/dcdr
|
dcdr-scene/blender_export_scene.py
|
Python
|
mit
| 9,613
|
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""Parse SQL statements."""
__version__ = '0.1.14'
# Setup namespace
from sqlparse import engine
from sqlparse import filters
from sqlparse import formatter
from sqlparse import lexer
from sqlparse import tokens as T
from sqlparse.engine import grouping
from sqlparse.parsers import SQLParser
# Deprecated in 0.1.5. Will be removed in 0.2.0
from sqlparse.exceptions import SQLParseError
def build_parsers():
parsers = dict()
for cls in SQLParser.__subclasses__():
parsers[cls.dialect] = cls()
return parsers
_parsers = build_parsers()
def parse(sql, encoding=None, dialect=None):
"""Parse sql and return a list of statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:param dialect: The sql engine dialect of the input sql statements.
It only supports "mysql" right now. If dialect is not specified,
The input sql will be parsed using the generic sql syntax. (optional)
:returns: A tuple of :class:`~sqlparse.sql.Statement` instances.
"""
stream = parsestream(sql, encoding, dialect)
return tuple(stream)
def parsestream(stream, encoding=None, dialect=None):
"""Parses sql statements from file-like object.
:param stream: A file-like object.
:param encoding: The encoding of the stream contents (optional).
:param dialect: The sql engine dialect of the input sql statements.
It only supports "mysql" right now. (optional)
:returns: A generator of :class:`~sqlparse.sql.Statement` instances.
"""
parser = _parsers.get(dialect)
if parser is None:
raise Exception("Unable to find parser to parse dialect ({0})."
.format(dialect))
return parser.parse(stream, encoding)
def format(sql, **options):
"""Format *sql* according to *options*.
Available options are documented in :ref:`formatting`.
In addition to the formatting options this function accepts the
keyword "encoding" which determines the encoding of the statement.
:returns: The formatted SQL statement as string.
"""
options = formatter.validate_options(options)
encoding = options.pop('encoding', None)
stream = lexer.tokenize(sql, encoding)
stream = _format_pre_process(stream, options)
stack = engine.FilterStack()
stack = formatter.build_filter_stack(stack, options)
stack.postprocess.append(filters.SerializerUnicode())
statements = split2(stream)
return ''.join(stack.run(statement) for statement in statements)
def _format_pre_process(stream, options):
pre_processes = []
if options.get('keyword_case', None):
pre_processes.append(
filters.KeywordCaseFilter(options['keyword_case']))
if options.get('identifier_case', None):
pre_processes.append(
filters.IdentifierCaseFilter(options['identifier_case']))
if options.get('truncate_strings', None) is not None:
pre_processes.append(filters.TruncateStringFilter(
width=options['truncate_strings'], char=options['truncate_char']))
return _pre_process(stream, pre_processes)
def _pre_process(stream, pre_processes):
if pre_processes:
for pre_process in pre_processes:
stream = pre_process.process(None, stream)
return stream
def split(sql, encoding=None):
"""Split *sql* into single statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A list of strings.
"""
stream = lexer.tokenize(sql, encoding)
splitter = StatementFilter()
stream = splitter.process(None, stream)
return [unicode(stmt).strip() for stmt in stream]
from sqlparse.engine.filter import StatementFilter
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
|
Yelp/sqlparse
|
sqlparse/__init__.py
|
Python
|
bsd-3-clause
| 4,111
|
import click
from groundcontrol.scheduler import Scheduler
from groundcontrol.server import app
from groundcontrol.deployment import Deployment
from groundcontrol.models import set_aws_host
from groundcontrol.models import set_aws_region
from groundcontrol.instrument.reporter import MetricReporter
from waitress import serve
import logging
import logging.config
import yaml
@click.group()
def cli():
pass
@cli.command()
@click.option('--debug', help='Run the server in debug mode', default=False, is_flag=True)
@click.option('--aws-region', help='aws region', envvar='AWS_DEFAULT_REGION', default='eu-west-1')
@click.option('--aws-alarms', help='Create CloudWatch Alarms', envvar='CW_CREATE_ALARMS', default=False)
@click.option('--aws-cw-topic', help='SNS topic ARN for CloudWatch Alarm', envvar='CW_SNS_TOPIC', default=None)
@click.option('--aws-cw-metric', help='CloudWatch Metric Name', envvar='CW_METRIC_NAME', default=None)
@click.option('--aws-cw-namespace', help='CloudWatch Metric Namespace', envvar='CW_NAMESPACE', default=None)
@click.option('--dynamodb-host', help='dynamodb host', envvar='DYNAMODB_HOST', default=None)
@click.option('-v', '--verbosity', help='verbosity', default='CRITICAL')
@click.option('--log-config', envvar='GC_LOG_CONFIG', help='python yaml config file', default=None)
@click.option('--flyby-endpoint', envvar='GC_FLYBY_ENDPOINT', help='Fully qualified FlyBy endpoint', default=None)
@click.option('-e', '--environment',
envvar='GC_ENVIRONMENT',
help='development or production',
type=click.Choice(
['development', 'production']),
default='development')
def server(debug, aws_region, aws_alarms, aws_cw_topic, aws_cw_metric, aws_cw_namespace, dynamodb_host, verbosity,
log_config, flyby_endpoint, environment):
logging.getLogger().setLevel(level=getattr(logging, verbosity))
if log_config:
with open(log_config, 'r') as conf:
logging.config.dictConfig(yaml.load(conf))
set_aws_host(dynamodb_host)
set_aws_region(aws_region)
Deployment.create_tables()
scheduler = Scheduler(region_name=aws_region, flyby_endpoint=flyby_endpoint)
MetricReporter(reporting_interval=10).start()
app.config['AWS_REGION'] = aws_region
if aws_alarms and aws_cw_metric and aws_cw_topic and aws_cw_namespace:
app.config['CW_CREATE_ALARMS'] = aws_alarms
app.config['CW_SNS_TOPIC'] = aws_cw_topic
app.config['CW_METRIC_NAME'] = aws_cw_metric
app.config['CW_NAMESPACE'] = aws_cw_namespace
try:
scheduler.start()
if environment == "development":
app.run(host='0.0.0.0', debug=debug)
else:
serve(app, listen='*:5000')
except (KeyboardInterrupt, SystemExit):
scheduler.stop()
raise
|
Skyscanner/ground-control
|
groundcontrol/cli.py
|
Python
|
apache-2.0
| 2,839
|
# This file is part of Lod4Stat.
#
# Copyright (C) 2014 Provincia autonoma di Trento
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Django models for l4s project.
"""
from django.db import models
from django.core.mail import send_mail
from django.utils import timezone
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from datetime import datetime
from django.conf import settings
from django.contrib.sites.models import Site
class CustomSite(Site):
in_manutenzione = models.BooleanField()
label = models.CharField(max_length=255)
class Meta:
verbose_name = 'Sito in manutenzione'
verbose_name_plural = 'Sito in manutenzione'
class Test3(models.Model):
"""
Test with 3 columns.
"""
id1 = models.IntegerField()
id2 = models.IntegerField()
numerosity = models.IntegerField(_('numerosity'))
class Test4(models.Model):
"""
Test with 4 columns.
"""
id1 = models.IntegerField()
id2 = models.IntegerField()
id3 = models.IntegerField()
numerosity = models.IntegerField(_('numerosity'))
class Test5(models.Model):
"""
Test with 5 columns.
"""
id1 = models.IntegerField()
id2 = models.IntegerField()
id3 = models.IntegerField()
id4 = models.IntegerField()
numerosity = models.IntegerField(_('numerosity'))
class TerritorialLevel(models.Model):
"""
Territorial Level model.
"""
name = models.CharField(max_length=255)
class OntologyFileModel(models.Model):
"""
Ontology file.
"""
name = models.CharField(max_length=255)
upload = models.FileField(upload_to='ontologies')
def delete(self, *args, **kwargs):
"""
Delete ontology file and its model.
:param args:
:param kwargs:
"""
self.upload.delete()
super(OntologyFileModel, self).delete(*args, **kwargs)
@property
def __unicode__(self):
"""
In unicode format.
:return: Name in unicode.
"""
return u'%s' % self.name
class Metadata(models.Model):
"""
A Metadata used to add <key, value> to main db table and columns.
"""
table_name = models.CharField(_('table name'), max_length=30, blank=False)
column_name = models.CharField(_('column name'), max_length=30, null=True)
key = models.CharField(_('key'), max_length=256, blank=False)
value = models.CharField(_('value'), max_length=256, blank=False)
class External_Metadata(models.Model):
"""
A Metadata used to add <key, value> to main db table and columns.
"""
table_name = models.CharField(_('table name'), max_length=128, blank=False)
column_name = models.CharField(_('column name'), max_length=128, null=True)
id_value = models.CharField(_('column name'), max_length=10, null=True)
key = models.CharField(_('key'), max_length=128, blank=False)
value = models.CharField(_('value'), max_length=1500, blank=False)
class Concept(models.Model):
"""
A Metadata used to add <key, value> to main db table and columns.
"""
key = models.CharField(_('key'), max_length=128, blank=False)
concept = models.CharField(_('concept'), max_length=500, null=True)
class ExecutedQueryLog(models.Model):
"""
Metadata to log queries.
"""
query_title = models.CharField(_('Title'), max_length=255)
query_body = models.CharField(_('Body'), max_length=20000)
executed_by = models.IntegerField()
executed_at = models.DateTimeField(auto_now_add=True)
ip_address = models.CharField(_('IP'), max_length=15)
@classmethod
def create(cls, query_title, query_body, executed_by, ip_address):
log = cls(query_title=query_title,
query_body=query_body,
executed_by=executed_by,
executed_at=datetime.now(),
ip_address=ip_address)
return log
class Reconciliation(models.Model):
"""
Table to perform reconciliation on data.
"""
table_name = models.CharField(max_length=256, blank=True)
column_name = models.CharField(max_length=256, blank=True)
code_id = models.IntegerField()
url = models.CharField(max_length=512, blank=True)
class ClassRange(models.Model):
"""
Classi per l'applicazione del range
"""
class_from = models.IntegerField(null=True, blank=True)
class_to = models.IntegerField(null=True, blank=True)
class ManualRequest(models.Model):
"""
Model used in order to enable the user to do a manual request to be
processed by an human user.
"""
inquirer = models.ForeignKey("User", null=True)
dispatcher = models.CharField(max_length=30, blank=True)
request_date = models.DateTimeField(auto_now_add=True)
dispatch_date = models.DateTimeField(blank=True, null=True)
dispatch_note = models.CharField(max_length=512,
blank=True,
null=True)
subject = models.CharField(_('subject'), max_length=512, blank=False)
goal = models.CharField(_('goal'), max_length=256, blank=False)
topic = models.CharField(_('topic'), max_length=100, blank=False)
requested_data = models.CharField(_('requested data'),
max_length=2048,
blank=False)
references_years = models.CharField(_('referenced years'),
max_length=30,
blank=False)
territorial_level = models.CharField(_('territorial level'),
max_length=30,
blank=False)
other_territorial_level = models.CharField(
_('other territorial level (specify)'),
max_length=30,
blank=True)
specific_territorial_level = models.CharField(
_('specific territorial level'),
max_length=400,
blank=True)
url = models.CharField(_('url'),
max_length=256,
blank=False)
dispatched = models.BooleanField(default=False)
class UserType(models.Model):
"""
A metadata to enrich the user with an user type.
The user type contains some user types used for statistical purposes.
"""
name = models.CharField(_('user type'), max_length=128)
position = models.IntegerField()
def __unicode__(self):
"""
Get the name in unicode format.
:return: The name in unicode.
"""
return self.name
class UserManager(BaseUserManager):
"""
The User model has a custom manager that has
the following helper methods
(in addition to the methods provided by BaseUserManager).
"""
def _create_user(self, email, password, is_staff, is_superuser,
is_man_req_dispatcher, **extra_fields):
"""
Creates and saves a new user with the given email and password.
:param email: Email.
:param password: Password.
:param is_staff: Is the user a staff member?
:param is_superuser: Is the user a superuser)
:param is_man_req_dispatcher: Can the user dispatch manual requests?
:param extra_fields: Unused.
"""
now = timezone.now()
if not email:
raise ValueError(_('The given email must be set'))
email = self.normalize_email(email)
user = self.model(email=email,
is_staff=is_staff,
is_manual_request_dispatcher=is_man_req_dispatcher,
is_active=True,
is_superuser=is_superuser,
last_login=now,
date_joined=now,
date_change_password=now,
**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
"""
Create an user without password.
:param email: Email.
:param password: password.
:param extra_fields: Unused
:return: The created User.
"""
return self._create_user(email, password, False, False, True,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
"""
Create a superuser.
:param email:
:param password:
:param extra_fields:
:return:The created User with super user privileges.
"""
return self._create_user(email, password, True, True,
**extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
Email and password are required. Other fields are optional.
"""
email = models.EmailField(_('email address'),
max_length=128,
unique=True,
blank=False)
first_name = models.CharField(_('first name'), max_length=32, blank=False)
last_name = models.CharField(_('last name'), max_length=32, blank=False)
phone_number = models.CharField(_('phone_number'),
max_length=15,
blank=True)
user_type = models.ForeignKey("UserType", verbose_name=_('User type'),
null=True)
is_staff_hlp = _('Designates whether the user can log'
'into this admin site.')
is_staff = models.BooleanField(_('staff status'),
default=False,
help_text=is_staff_hlp)
is_manual_request_dispatcher_hlp = _(
'Designates whether the user can receive '
'manual request nofications.')
is_manual_request_dispatcher = models.BooleanField(
_('manual request dispatcher status'),
default=False,
help_text=is_manual_request_dispatcher_hlp)
is_active_hlp = _('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.')
is_active = models.BooleanField(_('active'),
default=True,
help_text=is_active_hlp)
date_joined = models.DateTimeField(_('date joined'),
default=timezone.now())
date_change_password = models.DateTimeField(_('date change password'),
default=timezone.now())
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_absolute_url(self):
"""
Get the url with absolute path.
:return: The absolute url.
"""
return "/users/%s/" % urlquote(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
:return: The full user name.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
:return: The short user name.
"""
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
:param subject: Email subject.
:param message: Email text message.
:param from_email: Sender email address.
"""
send_mail(subject, message, from_email, [self.email])
def delete(self, *args, **kwargs):
send_mail(unicode(_('Unused ISPAT LOD4STAT application account')),
unicode(_("Your account (username %s) for access to the ISPAT LOD4STAT data dissemination web application (http://www.l4s.ispat.provincia.tn.it/) we are not used for more than two years. Therefore, considering that its interest has failed, we have canceled the account. If you would like to use the app in the future, please register again.")) % self.email,
settings.DEFAULT_FROM_EMAIL,
[self.email])
super(User,self).delete()
__original_password = None
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self.__original_password = self.password
def save(self, force_insert=False, force_update=False, *args, **kwargs):
if self.password != self.__original_password:
self.date_change_password = timezone.now()
self.__original_password = self.password
super(User, self).save(force_insert, force_update, *args, **kwargs)
def get_date_change_password(self):
"""
Returns the date change password
:return: The date change password.
"""
return self.date_change_password
class Synonym(models.Model):
"""
Tuple che rappresentano sinonimi separati da ;
"""
synonyms_list = models.CharField(max_length=5000)
|
trentino-sistemi/l4s
|
web/models.py
|
Python
|
agpl-3.0
| 14,065
|
__author__ = 'ariel'
"""
Python Population Simulator
Copyright (C) 2015 Ariel Young
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from Genetics import Expressions, Genotypes
from Crypto.Random import random
class TraitAlleles(object):
traitPhenotypes = {"furColor" : {"dominant" : "long", "recessive" : "short"},
"furLength" : {"dominant" : "black", "recessive" : "brown"},
"isTa" : {"dominant" : "tall", "recessive" : "short"}}
traitIsComplete = False
expression = None
trait = None
genotype = None
phenotype = None
letterOne = str
letterTwo = str
choices = []
def __init__(self, trait, alleles=False, letter_one = None, letter_two = None):
self.trait = trait
if alleles:
if letter_one != None:
self.letterOne = letter_one
if letter_two != None:
self.letterTwo = letter_two
if self.letterOne and self.letterTwo:
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
if trait == "furColor":
choices = list('Ff')
elif trait == "furLength":
choices = list('Ll')
elif trait == "isTall":
choices = list("Hh")
def getGenotype(self):
return self.genotype
def getExpression(self):
return self.expression
def setLetterOne(self, letter):
self.letterOne = letter
if self.letterOne and self.letterTwo:
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def setLetterTwo(self, letter):
self.letterTwo = letter
if self.letterOne and self.letterTwo:
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def getRandomAllele(self):
rand = random.randint(0, 1)
if rand:
return self.letterOne
else:
return self.letterTwo
def __determinePhenotype(self):
if self.genotype == Genotypes.DOMINANT:
self.phenotype = self.traitPhenotypes[self.trait]["dominant"]
else:
self.genotype = self.traitPhenotypes[self.trait]["recessive"]
self.choices = [self.letterOne, self.letterTwo]
def populateWithRandom(self):
self.letterOne = random.choice(self.choices)
self.letterTwo = random.choice(self.choices)
if self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
elif not self.letterOne.isupper() and not self.letterTwo.isupper():
self.expression = Expressions.HOMOZYGOUS_RECESSIVE
self.genotype = Genotypes.RECESSIVE
elif not self.letterOne.isupper() and self.letterTwo.isupper():
self.expression = Expressions.HETEROZYGOUS_DOMINANT
self.genotype = Genotypes.DOMINANT
self.__determinePhenotype()
def getAlleles(self):
if self.letterOne and self.letterTwo:
return (self.letterOne, self.letterTwo)
elif self.letterOne and not self.letterTwo:
return self.letterOne
elif self.letterTwo and not self.letterOne:
return self.letterTwo
def getAllelesAsList(self):
return [self.letterOne, self.letterTwo]
def getPhenotype(self):
return self.phenotype
|
DarthGeek01/PopulationSimulator
|
Genetics/Allele.py
|
Python
|
gpl-2.0
| 6,542
|
from marshmallow import ValidationError
class InputValidationError(ValidationError):
"""Raised when loading fails on an input-specialized schema"""
class InputLoaderSchemaMixin(object):
def load_strict(self, data):
loaded = self.load(data)
if loaded.errors:
raise InputValidationError(loaded.errors)
return loaded
|
ccortezia/featuring
|
featuring-flask-api/featuring/utilities/ext_schema.py
|
Python
|
mit
| 362
|
from setuptools import setup, find_packages
setup(
name = "pycloser",
version = "0.2",
packages = find_packages(),
author = "Arvin Kulagin",
author_email = "arvinkulagin@yandex.ru",
description = "Сlean exit for Python scripts after Ctrl-C.",
long_description = open("README.rst").read(),
license = "MIT",
keywords = "signals exit",
url = "https://github.com/arvinkulagin/pycloser"
)
|
arvinkulagin/pycloser
|
setup.py
|
Python
|
mit
| 395
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PySpacyModelsEnCoreWebSm(PythonPackage):
"""English multi-task CNN trained on OntoNotes. Assigns context-specific
token vectors, POS tags, dependency parse and named entities."""
homepage = "https://spacy.io/models/en#en_core_web_sm"
url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.3.1/en_core_web_sm-2.3.1.tar.gz"
version('2.3.1', sha256='06c80936324012d1223291d2af41a5229e746dc2dee8fe31a532666ee3d18aaa')
version('2.2.5', sha256='60b69065c97fd2e4972c33300205e1dead3501d2e0bfd6a182c3a033e337caee')
depends_on('py-setuptools', type='build')
depends_on('py-spacy@2.2.2:', type=('build', 'run'), when='@:2.2.5')
depends_on('py-spacy@2.3.0:2.3', type=('build', 'run'), when='@2.3.1:')
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-spacy-models-en-core-web-sm/package.py
|
Python
|
lgpl-2.1
| 967
|
"""NDG XACML parsers package
NERC DataGrid
"""
__author__ = "P J Kershaw"
__date__ = "15/03/10"
__copyright__ = "(C) 2010 Science and Technology Facilities Council"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = "$Id$"
import logging
log = logging.getLogger(__name__)
from abc import ABCMeta, abstractmethod
from ndg.xacml import XacmlError
from ndg.xacml.core import XacmlCoreBase
class XMLParseError(XacmlError):
"""XACML package XML Parsing error"""
class AbstractReader(object):
"""Abstract base class for XACML reader"""
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
"""Derived class must implement __call__"""
if cls is AbstractReader:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
@abstractmethod
def __call__(self, obj, common):
"""Abstract Parse XACML method
@raise NotImplementedError:
"""
raise NotImplementedError()
@classmethod
def parse(cls, obj, common):
"""Parse from input object and return new XACML object
@param obj: input source - file name, stream object or other
@type obj: string, stream or other
@return: new XACML object
@rtype: XacmlCoreBase sub type
"""
reader = cls()
return reader(obj, common)
class AbstractReaderFactory(object):
"""Abstract base class XACML reader factory"""
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def getReader(cls, xacmlType):
"""Get the reader class for the given XACML input type
@param xacmlType: XACML type to retrieve a reader for
@type xacmlType: ndg.xaml.core.XacmlCoreBase derived
@return: reader class
@rtype: ndg.xacml.parsers.AbstractReader derived type
"""
if not issubclass(xacmlType, XacmlCoreBase):
raise TypeError('Expecting %r derived class for getReader method; '
'got %r' % (XacmlCoreBase, xacmlType))
|
cedadev/ndg_xacml
|
ndg/xacml/parsers/__init__.py
|
Python
|
bsd-3-clause
| 2,221
|
##########################################################################
#
# Copyright (c) 2008-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os.path
import maya.cmds
import maya.OpenMaya as OpenMaya
import IECore
import IECoreMaya
class FromMayaMeshConverterTest( IECoreMaya.TestCase ) :
def testFactory( self ) :
sphere = maya.cmds.polySphere( subdivisionsX=10, subdivisionsY=5, constructionHistory=False )
sphere = maya.cmds.listRelatives( sphere, shapes=True )[0]
converter = IECoreMaya.FromMayaShapeConverter.create( sphere )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreMaya.TypeId.FromMayaMeshConverter ) ) )
converter = IECoreMaya.FromMayaShapeConverter.create( sphere, IECore.TypeId.MeshPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreMaya.TypeId.FromMayaMeshConverter ) ) )
converter = IECoreMaya.FromMayaShapeConverter.create( sphere, IECore.TypeId.Primitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreMaya.TypeId.FromMayaMeshConverter ) ) )
converter = IECoreMaya.FromMayaObjectConverter.create( sphere )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreMaya.TypeId.FromMayaMeshConverter ) ) )
converter = IECoreMaya.FromMayaObjectConverter.create( sphere, IECore.TypeId.MeshPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreMaya.TypeId.FromMayaMeshConverter ) ) )
converter = IECoreMaya.FromMayaObjectConverter.create( sphere, IECore.TypeId.Primitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreMaya.TypeId.FromMayaMeshConverter ) ) )
def testConstructor( self ) :
sphere = maya.cmds.polySphere( subdivisionsX=10, subdivisionsY=5, constructionHistory=False )
sphere = maya.cmds.listRelatives( sphere, shapes=True )[0]
converter = IECoreMaya.FromMayaMeshConverter( sphere )
m = converter.convert()
self.failUnless( isinstance( m, IECore.MeshPrimitive ) )
def testParameters( self ) :
sphere = maya.cmds.polySphere( subdivisionsX=10, subdivisionsY=5, constructionHistory=False )
sphere = maya.cmds.listRelatives( sphere, shapes=True )[0]
converter = IECoreMaya.FromMayaShapeConverter.create( sphere )
self.assertEqual( converter["interpolation"].getTypedValue(), "default" )
p = converter.convert()
self.assertEqual( p.interpolation, "linear" )
self.assertTrue( "N" in p )
converter["interpolation"].setTypedValue( "linear" )
p = converter.convert()
self.assertEqual( p.interpolation, "linear" )
converter["interpolation"].setTypedValue( "catmullClark" )
p = converter.convert()
self.assertFalse( "N" in p )
self.assertEqual( p.interpolation, "catmullClark" )
converter = IECoreMaya.FromMayaShapeConverter.create( sphere )
self.assertEqual( converter["points"].getTypedValue(), True )
m = converter.convert()
self.assert_( "P" in m )
self.assertEqual( m["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
converter["points"].setTypedValue( False )
self.assert_( not "P" in converter.convert() )
converter = IECoreMaya.FromMayaShapeConverter.create( sphere )
self.assertEqual( converter["normals"].getTypedValue(), True )
m = converter.convert()
self.assert_( "N" in m )
self.assertEqual( m["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
converter["normals"].setTypedValue( False )
self.assert_( not "N" in converter.convert() )
converter = IECoreMaya.FromMayaShapeConverter.create( sphere )
self.assertEqual( converter["st"].getTypedValue(), True )
self.assert_( "s" in converter.convert() )
self.assert_( "t" in converter.convert() )
converter["st"].setTypedValue( False )
self.assert_( not "s" in converter.convert() )
self.assert_( not "t" in converter.convert() )
def testInterpolationType( self ) :
sphere = maya.cmds.polySphere( subdivisionsX=10, subdivisionsY=5, constructionHistory=False )
sphere = maya.cmds.listRelatives( sphere, shapes=True )[0]
# first time creates the plug
IECoreMaya.ToMayaMeshConverter.setMeshInterpolationAttribute( sphere, "catmullClark" )
mesh = IECoreMaya.FromMayaShapeConverter.create( sphere ).convert()
self.assertEqual( mesh.interpolation, "catmullClark" )
# second time, just update the plug
IECoreMaya.ToMayaMeshConverter.setMeshInterpolationAttribute( sphere, "linear" )
mesh = IECoreMaya.FromMayaShapeConverter.create( sphere ).convert()
self.assertEqual( mesh.interpolation, "linear" )
# accepts the labels for the presets "subdiv" -> "catmullClark"
IECoreMaya.ToMayaMeshConverter.setMeshInterpolationAttribute( sphere, "subdiv" )
mesh = IECoreMaya.FromMayaShapeConverter.create( sphere ).convert()
self.assertEqual( mesh.interpolation, "catmullClark" )
# accepts the labels for the presets "poly" -> "linear"
IECoreMaya.ToMayaMeshConverter.setMeshInterpolationAttribute( sphere, "poly" )
mesh = IECoreMaya.FromMayaShapeConverter.create( sphere ).convert()
self.assertEqual( mesh.interpolation, "linear" )
def testSphere( self ) :
sphere = maya.cmds.polySphere( subdivisionsX=10, subdivisionsY=5, constructionHistory=False )
sphere = maya.cmds.listRelatives( sphere, shapes=True )[0]
converter = IECoreMaya.FromMayaShapeConverter.create( sphere )
m = converter.convert()
# check topology
self.assertEqual( m.verticesPerFace.size(), 50 )
self.assertEqual( m.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 50 )
self.assertEqual( m.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 42 )
self.assertEqual( m["P"].data.size(), 42 )
self.assertEqual( m["N"].data.size(), 180 )
self.assertEqual( m["s"].data.size(), 180 )
self.assertEqual( m["t"].data.size(), 180 )
self.assert_( m["P"].data == converter.points() )
self.assertEqual( m["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( m["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assert_( m["N"].data == converter.normals() )
self.assert_( m["s"].data == converter.s( "map1" ) )
self.assert_( m["t"].data == converter.t( "map1" ) )
self.assert_( IECore.Box3f( IECore.V3f( -1.0001 ), IECore.V3f( 1.0001 ) ).contains( m.bound() ) )
self.assert_( m.bound().contains( IECore.Box3f( IECore.V3f( -0.90 ), IECore.V3f( 0.90 ) ) ) )
def testSpaces( self ) :
sphere = maya.cmds.polySphere( subdivisionsX=10, subdivisionsY=5, constructionHistory=False )
maya.cmds.move( 1, 2, 3, sphere )
sphere = maya.cmds.listRelatives( sphere, shapes=True )[0]
converter = IECoreMaya.FromMayaShapeConverter.create( sphere )
self.assertEqual( converter["space"].getNumericValue(), IECoreMaya.FromMayaCurveConverter.Space.Object )
m = converter.convert()
self.assert_( IECore.Box3f( IECore.V3f( -1.0001 ), IECore.V3f( 1.0001 ) ).contains( m.bound() ) )
converter["space"].setNumericValue( IECoreMaya.FromMayaShapeConverter.Space.World )
m = converter.convert()
self.assert_( IECore.Box3f( IECore.V3f( -1.0001 ) + IECore.V3f( 1, 2, 3 ), IECore.V3f( 1.0001 ) + IECore.V3f( 1, 2, 3 ) ).contains( m.bound() ) )
def testNormalsOnlyWhenLinear( self ) :
# adding normals to a mesh which will be rendered subdivided is a very bad thing to do.
# make sure we aren't doing it.
sphere = maya.cmds.polySphere( subdivisionsX=10, subdivisionsY=5, constructionHistory=False )
sphere = maya.cmds.listRelatives( sphere, shapes=True )[0]
converter = IECoreMaya.FromMayaShapeConverter.create( sphere )
m = converter.convert()
self.assert_( "N" in m )
self.assertEqual( m["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
converter["interpolation"].setTypedValue( "catmullClark" )
m = converter.convert()
self.assert_( not "N" in m )
def testWindingOrder( self ) :
plane = maya.cmds.polyPlane( ch=False, subdivisionsX=1, subdivisionsY=1 )
plane = maya.cmds.listRelatives( plane, shapes=True )[0]
converter = IECoreMaya.FromMayaShapeConverter.create( plane )
m = converter.convert()
p = m["P"].data
vertexIds = m.vertexIds
self.assertEqual( vertexIds.size(), 4 )
loop = IECore.V3fVectorData( [ p[vertexIds[0]], p[vertexIds[1]], p[vertexIds[2]], p[vertexIds[3]] ] )
self.assert_( IECore.polygonNormal( loop ).equalWithAbsError( IECore.V3f( 0, 1, 0 ), 0.0001 ) )
def testBlindData( self ) :
plane = maya.cmds.polyPlane( ch=False, subdivisionsX=1, subdivisionsY=1 )
plane = maya.cmds.listRelatives( plane, shapes=True )[0]
maya.cmds.addAttr( plane, dataType="string", longName="ieString" )
maya.cmds.setAttr( plane + ".ieString", "banana", type="string" )
converter = IECoreMaya.FromMayaShapeConverter.create( plane )
converter['blindDataAttrPrefix'] = IECore.StringData("ie")
m = converter.convert()
self.assertEqual( len( m.blindData().keys() ), 2 )
self.assertEqual( m.blindData()["name"], IECore.StringData( "pPlaneShape1" ) )
self.assertEqual( m.blindData()["ieString"], IECore.StringData( "banana" ) )
def testPrimVars( self ) :
plane = maya.cmds.polyPlane( ch=False, subdivisionsX=1, subdivisionsY=1 )
plane = maya.cmds.listRelatives( plane, shapes=True )[0]
maya.cmds.addAttr( plane, attributeType="float", longName="delightDouble", defaultValue=1 )
maya.cmds.addAttr( plane, dataType="doubleArray", longName="delightDoubleArray" )
maya.cmds.setAttr( plane + ".delightDoubleArray", ( 10, 11, 12, 13 ), type="doubleArray" )
converter = IECoreMaya.FromMayaShapeConverter.create( plane, IECore.MeshPrimitive.staticTypeId() )
m = converter.convert()
self.assertEqual( len( m.keys() ), 7 )
self.assertEqual( m["Double"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( m["Double"].data, IECore.FloatData( 1 ) )
self.assertEqual( m["DoubleArray"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( m["DoubleArray"].data, IECore.FloatVectorData( [ 10, 11, 12, 13 ] ) )
def testConvertFromPlug( self ) :
sphere = maya.cmds.polySphere( subdivisionsX=10, subdivisionsY=5, constructionHistory=False )
maya.cmds.move( 1, 2, 3, sphere )
sphere = maya.cmds.listRelatives( sphere, shapes=True )[0]
converter = IECoreMaya.FromMayaPlugConverter.create( sphere + ".worldMesh" )
converter["space"].setNumericValue( IECoreMaya.FromMayaShapeConverter.Space.World )
m = converter.convert()
self.assertEqual( m["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( m["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assert_( IECore.Box3f( IECore.V3f( -1.0001 ) + IECore.V3f( 1, 2, 3 ), IECore.V3f( 1.0001 ) + IECore.V3f( 1, 2, 3 ) ).contains( m.bound() ) )
def testSharedSTIndices( self ) :
maya.cmds.file( os.path.dirname( __file__ ) + "/scenes/twoTrianglesWithSharedUVs.ma", force = True, open = True )
mesh = IECoreMaya.FromMayaShapeConverter.create( "pPlaneShape1" ).convert()
self.failUnless( "stIndices" in mesh )
self.assertEqual( mesh["stIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( mesh["stIndices"].data, IECore.IntVectorData( [ 0, 1, 2, 2, 1, 3 ] ) )
def testSplitSTIndices( self ) :
maya.cmds.file( os.path.dirname( __file__ ) + "/scenes/twoTrianglesWithSplitUVs.ma", force = True, open = True )
mesh = IECoreMaya.FromMayaShapeConverter.create( "pPlaneShape1" ).convert()
self.failUnless( "stIndices" in mesh )
self.assertEqual( mesh["stIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( mesh["stIndices"].data, IECore.IntVectorData( [ 0, 1, 5, 2, 4, 3 ] ) )
def testExtraSTs( self ) :
plane = maya.cmds.polyPlane( ch=False, subdivisionsX=1, subdivisionsY=1 )
plane = maya.cmds.listRelatives( plane, shapes=True )[0]
converter = IECoreMaya.FromMayaShapeConverter.create( plane, IECore.MeshPrimitive.staticTypeId() )
m = converter.convert()
self.assert_( "s" in m )
self.assert_( "t" in m )
self.assert_( "stIndices" in m )
self.assert_( "map1_s" not in m )
self.assert_( "map1_t" not in m )
self.assert_( "map1Indices" not in m )
maya.cmds.polyUVSet( plane, copy=True, uvSet="map1", newUVSet="map2" )
m = converter.convert()
self.assert_( "s" in m )
self.assert_( "t" in m )
self.assert_( "stIndices" in m )
self.assert_( "map1_s" not in m )
self.assert_( "map1_t" not in m )
self.assert_( "map1Indices" not in m )
self.assert_( "map2_s" in m )
self.assert_( "map2_t" in m )
self.assert_( "map2Indices" in m )
def testManyUVConversionsFromPlug( self ) :
coreMesh = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob" ).read()
self.assertTrue( "s" in coreMesh )
self.assertTrue( "t" in coreMesh )
for i in range( 0, 7 ) :
coreMesh[ "testUVSet%d_s" % i ] = IECore.PrimitiveVariable( coreMesh["s"].interpolation, coreMesh["s"].data.copy() )
coreMesh[ "testUVSet%d_t" % i ] = IECore.PrimitiveVariable( coreMesh["t"].interpolation, coreMesh["t"].data.copy() )
fn = IECoreMaya.FnOpHolder.create( "test", "meshMerge" )
mayaMesh = maya.cmds.ls( maya.cmds.polyPlane(), dag=True, type="mesh" )[0]
maya.cmds.connectAttr( fn.name()+".result", mayaMesh+".inMesh", force=True )
op = fn.getOp()
with fn.parameterModificationContext() :
op["input"].setValue( coreMesh )
maya.cmds.file( rename="/tmp/test.ma" )
maya.cmds.file( save=True )
maya.cmds.file( new=True, f=True )
maya.cmds.file( "/tmp/test.ma", open=True )
result = IECoreMaya.FromMayaMeshConverter( mayaMesh ).convert()
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 760 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.FaceVarying ), 2280 )
self.assertEqual( coreMesh["s"], result["s"] )
self.assertEqual( coreMesh["t"], result["t"] )
for i in range( 0, 7 ) :
self.assertEqual( coreMesh[ "testUVSet%d_s" % i ], result[ "testUVSet%d_s" % i ] )
self.assertEqual( coreMesh[ "testUVSet%d_t" % i ], result[ "testUVSet%d_t" % i ] )
def testColors( self ):
# test alpha to rgb conversion
mesh = "pPlaneShape1"
maya.cmds.file( os.path.dirname( __file__ ) + "/scenes/colouredPlane.ma", force = True, open = True )
sel = OpenMaya.MSelectionList()
sel.add( mesh )
planeObj = OpenMaya.MObject()
sel.getDependNode( 0, planeObj )
fnMesh = OpenMaya.MFnMesh( planeObj )
fnMesh.setCurrentColorSetName( "cAlpha" )
converter = IECoreMaya.FromMayaShapeConverter.create( mesh, IECore.MeshPrimitive.staticTypeId() )
converter['colors'] = True
m = converter.convert()
self.assertEqual( m['Cs'].data, IECore.Color3fVectorData( [ IECore.Color3f(0), IECore.Color3f(1), IECore.Color3f(0.8), IECore.Color3f(0.5) ] ) )
self.assertEqual( converter.colors("cAlpha",True), m['Cs'].data )
# test rgba to rgb conversion
maya.cmds.file( os.path.dirname( __file__ ) + "/scenes/colouredPlane.ma", force = True, open = True )
sel = OpenMaya.MSelectionList()
sel.add( mesh )
planeObj = OpenMaya.MObject()
sel.getDependNode( 0, planeObj )
fnMesh = OpenMaya.MFnMesh( planeObj )
fnMesh.setCurrentColorSetName( "cRGBA" )
converter = IECoreMaya.FromMayaShapeConverter.create( mesh, IECore.MeshPrimitive.staticTypeId() )
converter['colors'] = True
m = converter.convert()
self.assertEqual( m['Cs'].data, IECore.Color3fVectorData( [ IECore.Color3f( 1, 1, 0 ), IECore.Color3f( 1, 1, 1 ), IECore.Color3f( 0, 1, 1 ), IECore.Color3f( 0, 1, 0 ) ] ) )
self.assertEqual( converter.colors("cRGBA",True), m['Cs'].data )
def testExtraColors( self ):
maya.cmds.file( os.path.dirname( __file__ ) + "/scenes/colouredPlane.ma", force = True, open = True )
mesh = "pPlaneShape1"
converter = IECoreMaya.FromMayaShapeConverter.create( mesh, IECore.MeshPrimitive.staticTypeId() )
converter['extraColors'] = True
m = converter.convert()
self.assertEqual( m['cAlpha_Cs'].data, IECore.FloatVectorData( [ 0, 1, 0.8, 0.5 ] ) )
self.assertEqual( converter.colors("cAlpha"), m['cAlpha_Cs'].data )
self.assertEqual( m['cRGB_Cs'].data, IECore.Color3fVectorData( [ IECore.Color3f(1,0,0), IECore.Color3f(0), IECore.Color3f(0,0,1), IECore.Color3f(0,1,0) ] ) )
self.assertEqual( converter.colors("cRGB"), m['cRGB_Cs'].data )
self.assertEqual( m['cRGBA_Cs'].data, IECore.Color4fVectorData( [ IECore.Color4f( 1, 1, 0, 0.5 ), IECore.Color4f( 1, 1, 1, 1 ), IECore.Color4f( 0, 1, 1, 1 ), IECore.Color4f( 0, 1, 0, 0.5 ) ] ) )
self.assertEqual( converter.colors("cRGBA"), m['cRGBA_Cs'].data )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
|
hradec/cortex
|
test/IECoreMaya/FromMayaMeshConverterTest.py
|
Python
|
bsd-3-clause
| 18,381
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('naf_autoticket', '0023_hostdevice_hostlocation'),
]
operations = [
migrations.CreateModel(
name='AlertCorrelationWeight',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('AlertCompare', models.CharField(max_length=100)),
('TimeWeight', models.CharField(max_length=50)),
('LocationWeight', models.CharField(max_length=255, null=True)),
('LogicalWeight', models.CharField(max_length=255, null=True)),
('AlertInfo', models.ForeignKey(to='naf_autoticket.AlertInfo')),
],
options={
'db_table': 'nafautoticket_alertcorrelationweight',
'verbose_name_plural': 'alertcorrelationweight',
},
),
]
|
kevinnguyeneng/django-uwsgi-nginx
|
app/naf_autoticket/migrations/0024_alertcorrelationweight.py
|
Python
|
gpl-3.0
| 1,043
|
import datetime
import os
import xml.etree.cElementTree as ET
import sys
from zipfile import ZipFile
# see if this filename is in the array of filenames
def checkPresent(arr, fileName, curResult):
# don't bother testing if we've already failed
if curResult is False:
return False
if fileName not in arr:
print (fileName + " is missing")
return False
else:
return True
def checkFolder():
# do some checks on this folder
arr = os.listdir()
# initialise success flag
result = True
result = checkPresent(arr, "compositeContent.xml", result)
result = checkPresent(arr, "compositeArtifacts.xml", result)
result = checkPresent(arr, "p2.index", result)
if result is True:
zipFound = findZipFile()
if zipFound is None:
print("Zip-file not found in folder")
result = False
return result
def getDTG():
now = datetime.datetime.now()
return now.strftime("%Y_%m_%d__%H_%M_%S")
# see if there is a zip=file in the current folder
def findZipFile():
arr = os.listdir()
for file in arr:
if(file.endswith(".zip")):
return file
# update the children element
def updateChildren(contentFile, newFile):
tree = ET.parse(contentFile)
# check root
root = tree.getroot()
root_tag = root.tag
if root_tag != "repository":
print("# Invalid root tag in " + contentFile + " Quitting")
sys.exit()
children = root.find("./children")
# increment counter
curSize = children.attrib["size"]
children.attrib["size"] = str(int(curSize) + 1)
# insert new child element
newChild = ET.SubElement(children, "child")
newChild.tail = '\n'
newChild.set("location", "updates/" + newFile)
# write to file
tree.write(contentFile, short_empty_elements=True)
# prepend processing instructions
with open(contentFile, 'r') as file:
str1 = "<?xml version='1.0' encoding='UTF-8'?>\n"
str2 = "<?compositeMetadataRepository version='1.0.0'?>\n"
data = file.read()
data = str2 + data
data = str1 + data
with open(contentFile, 'w') as file:
file.write(data)
def unpackZip(filename, dtg, folder):
# Create a ZipFile Object and load sample.zip in it
with ZipFile(filename, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(folder + "/" + dtg)
# check things are ok
allOk = checkFolder()
if allOk:
print( "Valid folder, doing update")
# retrieve zip file, we've already established that it's present
zipFile = (findZipFile())
dtg = getDTG()
newName = dtg + ".zip"
print("Renaming zip file with DTG")
# rename file
os.rename(zipFile, newName)
print("Updating XML metadata")
# parse category files
updateChildren("compositeContent.xml", dtg)
updateChildren("compositeArtifacts.xml", dtg)
print("Unpacking repository")
# unpack the zip into the updates folder
unpackZip(newName, dtg, "updates")
print("Deleting .zip file")
# lastly, delete the zip file
os.remove(newName)
print("== COMPLETE ==")
|
debrief/debrief
|
org.mwc.cmap.combined.feature/root_installs/sample_data/other_formats/repository/update_Python.py
|
Python
|
epl-1.0
| 3,203
|
from keystone.manage2 import base
from keystone.manage2 import mixins
class Command(base.BaseBackendCommand, mixins.ListMixin):
"""Lists all users in the system."""
# pylint: disable=E1101
def get_users(self):
return self.user_manager.get_all()
def run(self, args):
"""Process argparse args, and print results to stdout"""
table = self.build_table(["ID", "Name", "Email", "Default Tenant ID",
"Enabled"])
for user in self.get_users():
row = [user.id, user.name, user.email, user.tenant_id,
user.enabled]
table.add_row(row)
# TODO(dolph): sort order and subsets could become CLI options
self.print_table(table)
|
HugoKuo/keystone-essex3
|
keystone/manage2/commands/list_users.py
|
Python
|
apache-2.0
| 736
|
#------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# TestFastVisibilityByCircle_SingleInput.py
# Description: Test Publishable Tasks Toolbox > Fast Visibility By Circle with single input circle
# Requirements: ArcGIS Desktop Standard with Spatial Analyst Extension
# ----------------------------------------------------------------------------
import arcpy
import sys
import traceback
import TestUtilities
import os
class LicenseError(Exception):
pass
try:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
else:
raise LicenseError
arcpy.ImportToolbox(TestUtilities.toolbox)
arcpy.env.overwriteOutput = True
inputCircle = os.path.join(TestUtilities.inputGDB, "FVBC_InputCircle")
inputHeightAboveSurface = 5.0
inputElevationURL = TestUtilities.inputElevationURL
psOutput = os.path.join(TestUtilities.outputGDB, "FVBCircle_1_vshed")
#Testing Path Slope
arcpy.AddMessage("Starting Test: Fast Visibility By Circle - Single input circle")
arcpy.FastVisibilityByCircle_pubtask(inputCircle, inputHeightAboveSurface, inputElevationURL,psOutput)
#Verify Results
outputFeatureCount = int(arcpy.GetCount_management(psOutput).getOutput(0))
print("Output Viewshed: " + str(psOutput))
print("Output Observer Count: " + str(outputFeatureCount))
if (outputFeatureCount < 1):
print("Invalid Output Feature Count: " + str(outputFeatureCount))
raise Exception("Test Failed")
print("Test Passed")
except LicenseError:
print("Spatial Analyst license is unavailable" )
except arcpy.ExecuteError:
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
# return a system error code
sys.exit(-1)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# return a system error code
sys.exit(-1)
finally:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckInExtension("Spatial")
|
JudTown17/solutions-geoprocessing-toolbox
|
data_management/test/test_publishable_tasks/TestFastVisibilityByCircle_SingleInput.py
|
Python
|
apache-2.0
| 3,255
|
"""
Unit tests of Raster elements
"""
import numpy as np
from holoviews.element import Raster, Image
from holoviews.element.comparison import ComparisonTestCase
class TestRaster(ComparisonTestCase):
def setUp(self):
self.array1 = np.array([(0, 1, 2), (3, 4, 5)])
def test_raster_init(self):
Raster(self.array1)
def test_image_init(self):
image = Image(self.array1)
self.assertEqual(image.xdensity, 3)
self.assertEqual(image.ydensity, 2)
def test_raster_index(self):
raster = Raster(self.array1)
self.assertEqual(raster[0, 1], 3)
def test_image_index(self):
image = Image(self.array1)
self.assertEqual(image[-.33, -0.25], 3)
def test_raster_sample(self):
raster = Raster(self.array1)
self.assertEqual(raster.sample(y=0).data,
np.array([(0, 0), (1, 1), (2, 2)]))
def test_image_sample(self):
image = Image(self.array1)
self.assertEqual(image.sample(y=0.25).data,
np.array([(-0.333333, 0), (0, 1), (0.333333, 2)]))
|
mjabri/holoviews
|
tests/testraster.py
|
Python
|
bsd-3-clause
| 1,103
|
# -*- coding: utf-8 -*-
# This file is part of tornado-stale-client.
# https://github.com/globocom/tornado-stale-client
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Globo.com <backstage@corp.globo.com>
from tornado_stale_client.client import StaleHTTPClient # NOQA
__version__ = '0.2.2'
|
globocom/tornado-stale-client
|
tornado_stale_client/__init__.py
|
Python
|
mit
| 352
|
from headers import *
class PathFinder:
def __init__(self, image_path):
self.image = cv2.imread(image_path)
def find_and_draw(self):
binary_image = self.filtered_binary_image(self.image)
contours, hierarchy = self.find_contours(binary_image)
graph = self.build_connection_graph(contours, hierarchy)
start_arrow_point = self.start_arrow_point(self.image)
start_arrow_contour = self.find_contour_by_point(
start_arrow_point, contours, hierarchy
)
path = self.build_path(graph, start_arrow_contour)
self.visualize_path(path, contours)
self.draw()
def visualize_path(self, path, contours):
for verticle in zip(path, range(1, len(path)+1)):
contour_ind = verticle[0]
show_ind = verticle[1]
contour = contours[contour_ind]
# Start arrow
if show_ind == 1:
cv2.ellipse(
self.image, cv2.fitEllipse(contour), (0, 255, 200), 3
)
# Treasure
if show_ind == len(path):
cv2.ellipse(
self.image, cv2.fitEllipse(contour), (11, 11, 200), 3
)
(x, y) = self.fit_ellipse(contour)
# Path
font = cv2.FONT_HERSHEY_SIMPLEX
point = (int(x)+10, int(y))
color = (11, 184, 134)
cv2.putText(self.image, str(show_ind), point, font, 1, color, 2)
def build_path(self, graph, start_ind):
cur_ind = graph[start_ind][0]
path = [start_ind, cur_ind]
while (cur_ind != []):
cur_ind = graph[cur_ind]
if cur_ind == []:
break
else:
cur_ind = cur_ind[0]
path.append(cur_ind)
return path
def draw(self):
rgb_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
plt.imshow(rgb_image)
plt.show()
def start_arrow_point(self, image):
_, _, r = cv2.split(image)
_, th251 = cv2.threshold(r, 251, 1, cv2.THRESH_BINARY)
_, thn255 = cv2.threshold(r, 254, 1, cv2.THRESH_BINARY_INV)
unfiltered = th251 & thn255
binary = ImageFilter().erode(
unfiltered, {"kernel": np.ones((7, 7), np.uint8), "iters": 1}
)
contours, _ = self.find_contours(binary)
if len(contours) != 1:
raise Exception("Start arrow should have only 1 contour")
(x, y) = self.fit_ellipse(contours[0])
return (int(x), int(y))
def find_contours(self, binary):
contours, hierarchy = cv2.findContours(
binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
return contours, hierarchy[0]
def fit_ellipse(self, contour):
(x, y), (_, _), _ = cv2.fitEllipse(contour)
return (x, y)
def filtered_binary_image(self, image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, blobs = cv2.threshold(gray_image, 3, 1, cv2.THRESH_BINARY)
_, start_circle = cv2.threshold(gray_image, 100, 1, cv2.THRESH_BINARY)
_, arrows_with_circles = cv2.threshold(
gray_image, 250, 1, cv2.THRESH_BINARY
)
binary = blobs & (cv2.bitwise_not(start_circle)) | arrows_with_circles
binary = ImageFilter().dilate(
binary, {"kernel": np.ones((7, 7), np.uint8), "iters": 1}
)
return binary
def build_connection_graph(self, contours, hierarchy):
intersections = {}
for component in zip(contours, hierarchy, range(0, len(contours))):
current_contour = component[0]
current_hierarchy = component[1]
ind = component[2]
is_parent = current_hierarchy[3] < 0
if is_parent:
child_contour = contours[current_hierarchy[2]]
(x1, y1) = self.fit_ellipse(current_contour)
(x2, y2) = self.fit_ellipse(child_contour)
angle = math.atan((y2-y1) / (x2-x1))
# very_magic_const helps to scale line in in LinePoint#scale
very_magic_const = 0.79
is_up_direction = (x1+very_magic_const > x2)
line_point = LinePoint((x2, y2), angle, is_up_direction)
x1, y1 = int(x1), int(y1)
intersection_ind = self.intersection(
line_point, contours, hierarchy
)
intersections[ind] = intersection_ind
if intersection_ind and (intersection_ind[0] == ind):
raise Exception("The same verticle in line intersection!")
ind += 1
return intersections
def intersection(self, line_point, contours, hierarchy):
result = []
for component in zip(contours, hierarchy, range(0, len(contours))):
current_contour = component[0]
current_hierarchy = component[1]
ind = component[2]
is_parent = current_hierarchy[3] < 0
if is_parent:
# Scale line from (x2, y2) to (x2+length,y2+length) with angle
# to find intersection between two contours
for scale_val in range(0, 20):
length = 62 + scale_val
scaled_point = self.scale_point(line_point, length)
if self.is_in_approx_poly(current_contour, scaled_point):
result.append(ind)
break
if len(result) > 1:
raise Exception("More than one intersection in arrows")
return result
def find_contour_by_point(self, point, contours, hierarchy):
for component in zip(contours, hierarchy, range(0, len(contours))):
current_contour = component[0]
current_hierarchy = component[1]
ind = component[2]
is_parent = current_hierarchy[3] < 0
if is_parent and self.is_in_approx_poly(current_contour, point):
return ind
def is_in_approx_poly(self, contour, point):
approx_contour = cv2.approxPolyDP(contour, 10, True)
is_in_poly = cv2.pointPolygonTest(approx_contour, point, False)
return (is_in_poly != -1)
def scale_point(self, line_point, length):
scaled = line_point.scale(length)
return scaled.point
class LinePoint:
def __init__(self, point, angle, is_up_direction):
self.point = point
self.angle = angle
self.is_up_direction = is_up_direction
def scale(self, scale_val):
(x, y) = self.point
if self.is_up_direction:
x = int(x - scale_val*math.cos(self.angle))
y = int(y - scale_val*math.sin(self.angle))
else:
x = int(x + scale_val*math.cos(self.angle))
y = int(y + scale_val*math.sin(self.angle))
return LinePoint((x, y), self.angle, self.is_up_direction)
# PathFinder('./data/Klad01.jpg').find_and_draw()
|
sld/computer_vision_workshop
|
Seminars/PathFinder/path_finder.py
|
Python
|
mit
| 7,036
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('barsystem', '0015_auto_20150505_2046'),
]
operations = [
migrations.AddField(
model_name='person',
name='allow_remote_access',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='person',
name='balance_limit',
field=models.DecimalField(default=0, max_digits=5, decimal_places=2),
),
migrations.AddField(
model_name='person',
name='remote_passphrase',
field=models.CharField(max_length=50, default='', blank=True),
),
migrations.AlterField(
model_name='product',
name='quantity_type',
field=models.CharField(max_length=100, default='None', choices=[('None', 'None'), ('enter_numeric', 'Numeric input')], blank=True),
),
]
|
TkkrLab/barsystem
|
barsystem/src/barsystem/migrations/0016_auto_20150505_2342.py
|
Python
|
mit
| 1,047
|
"""Add minimal paper reviewing data to DB
Revision ID: 33eb26faf225
Revises: 1af04f7ede7a
Create Date: 2016-04-03 11:58:16.756125
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql.ddl import CreateSchema, DropSchema
from indico.core.db.sqlalchemy import PyIntEnum, UTCDateTime
from indico.modules.events.paper_reviewing.models.roles import PaperReviewingRoleType
# revision identifiers, used by Alembic.
revision = '33eb26faf225'
down_revision = '3b0b69b541a2'
def upgrade():
op.execute(CreateSchema('event_paper_reviewing'))
op.create_table(
'contribution_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False, index=True),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('role', PyIntEnum(PaperReviewingRoleType), nullable=False, index=True),
sa.ForeignKeyConstraint(['contribution_id'], [u'events.contributions.id']),
sa.ForeignKeyConstraint(['user_id'], [u'users.users.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
op.create_table(
'paper_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('revision_id', sa.Integer(), nullable=True),
sa.Column('storage_backend', sa.String(), nullable=False),
sa.Column('content_type', sa.String(), nullable=False),
sa.Column('size', sa.BigInteger(), nullable=False),
sa.Column('storage_file_id', sa.String(), nullable=False),
sa.Column('filename', sa.String(), nullable=False),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.ForeignKeyConstraint(['contribution_id'], [u'events.contributions.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
def downgrade():
op.drop_table('paper_files', schema='event_paper_reviewing')
op.drop_table('contribution_roles', schema='event_paper_reviewing')
op.execute(DropSchema('event_paper_reviewing'))
|
belokop/indico_bare
|
migrations/versions/201604081138_33eb26faf225_add_minimal_paper_reviewing_data_to_db.py
|
Python
|
gpl-3.0
| 2,140
|
# -*- coding: utf-8 -*-
"""
threatmetrix.tests.__init__
~~~~~~~~~~~~~~~~~~~~~~~~~~~
init file for threatmetrix
"""
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'threatmetrix.tests.settings'
from .threatmetrix import *
|
CashStar/django-threatmetrix
|
threatmetrix/tests/__init__.py
|
Python
|
bsd-3-clause
| 240
|
import csv
# file_xy是景点经纬度的csv文件,file_value是景点热度的csv文件
file_xy = open(r'J:\四会多规合一\四会景点坐标.csv')
file_value = open(r'J:\四会多规合一\四会景点热度.csv')
def nodes_reader(file):
'''
A function to drag out x and y coordinates from a csv file
(n row 2 col) and convert them into complex numbers and store
into a frozen set. It's extended to work with more than 2 colums
and can omit empty rows.
'''
assembly = []
readerxy = csv.reader(file, delimiter=',', skipinitialspace=True)
filter_num = first_n(file_value)#Attention value below this will be omited.
print('firstn:', filter_num)
for row in readerxy:
if row[0] != '':
latitude = float(row[1])# latitude as Y coordinate
longitude = float(row[2])# longitude as X coordinate
coor = complex(longitude, latitude)
if float(row[3]) >= filter_num:
assembly.append(coor)
print('length:', len(assembly))
return frozenset(assembly)
def first_n(file):
value_list = []
readerv = csv.reader(file, delimiter=',', skipinitialspace=True)
for row in readerv:
if row[1] != 0:
value_list.append(float(row[1]))
value_list.sort()
n = 10
return value_list[(len(value_list)-1)-n]# 这里的n是要打算从中取n个景点
test = nodes_reader(file_xy)
print(test)
|
spencerpomme/coconuts-on-fire
|
csv model test.py
|
Python
|
apache-2.0
| 1,468
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.utils.translation import ugettext_lazy as _
from shuup.admin.base import Section
from shuup.campaigns.models import BasketCampaign, CatalogCampaign
from shuup.core.models import Shop, ShopProduct
class ProductCampaignsSection(Section):
identifier = "product_campaigns"
name = _("Active Campaigns")
icon = "fa-bullhorn"
template = "shuup/campaigns/admin/_product_campaigns.jinja"
@staticmethod
def visible_for_object(product):
return bool(product.pk)
@staticmethod
def get_context_data(product):
ctx = {}
for shop in Shop.objects.all():
try:
shop_product = product.get_shop_instance(shop)
except ShopProduct.DoesNotExist:
continue
ctx[shop] = {
"basket_campaigns": BasketCampaign.get_for_product(shop_product),
"catalog_campaigns": CatalogCampaign.get_for_product(shop_product)
}
return ctx
|
suutari-ai/shoop
|
shuup/campaigns/admin_module/sections.py
|
Python
|
agpl-3.0
| 1,238
|
#!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import sys
from desktop.lib.django_util import JsonResponse
from desktop.lib.i18n import force_unicode
from metadata.manager_client import ManagerApi
from notebook.models import _get_notebook_api
from kafka.conf import has_kafka_api
from kafka.kafka_client import KafkaApi, KafkaApiException
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
def error_handler(view_fn):
def decorator(*args, **kwargs):
status = 500
response = {
'message': ''
}
try:
return view_fn(*args, **kwargs)
except KafkaApiException as e:
try:
response['message'] = json.loads(e.message)
except Exception:
response['message'] = force_unicode(e.message)
except Exception as e:
message = force_unicode(e)
response['message'] = message
LOG.exception(message)
return JsonResponse(response, status=status)
return decorator
@error_handler
def list_topics(request):
return JsonResponse({
'status': 0,
'topics': [
{'name': topic} for topic in get_topics(request.user)
]
})
@error_handler
def list_topic(request):
name = request.POST.get('name')
topic = get_topic(name)
topic['name'] = name
topic['type'] = 'topic'
return JsonResponse({
'status': 0,
'topic': topic
})
@error_handler
def create_topic(request):
name = request.POST.get('name')
partitions = request.POST.get('partitions', 1)
replication_factor = request.POST.get('replication_factor', 1)
status = KafkaApi().create_topic(name, partitions, replication_factor)
return JsonResponse({
'status': status,
'topic': {
'name': name,
'partitions': partitions,
'replication_factor': replication_factor
}
})
def get_topics(user):
if has_kafka_api():
return KafkaApi().topics()
else:
data = {
'snippet': {},
'database': 'topics'
}
return [
topic['name']
for topic in _get_notebook_api(user, connector_id=56).autocomplete(**data)['tables_meta']
if not topic['name'].startswith('__')
]
def get_topic_data(user, name):
data = _get_notebook_api(user, connector_id=56).get_sample_data(snippet={})
print(data)
return data
def get_topic(name):
if has_kafka_api():
pass
else:
manager = ManagerApi()
broker_host = manager.get_kafka_brokers().split(',')[0].split(':')[0]
return manager.get_kafka_topics(broker_host)[name]
|
kawamon/hue
|
desktop/libs/kafka/src/kafka/kafka_api.py
|
Python
|
apache-2.0
| 3,371
|
def exists():
return True
def make_incflags(paths, RDirs):
result = []
for path in paths:
if not str(path).startswith("#"):
for rdir in RDirs((path,)):
result.append("-isystem")
result.append(str(rdir))
else:
for rdir in RDirs((path,)):
result.append("-I" + str(rdir))
return result
def generate(env):
if "gcc" in env["TOOLS"]:
env["make_incflags"] = make_incflags
env["INCPREFIX"] = ""
env["_CPPINCFLAGS"] = "$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, lambda x : make_incflags(x, RDirs), TARGET, SOURCE)} $)"
|
alalazo/wesnoth
|
scons/system_include.py
|
Python
|
gpl-2.0
| 647
|
# -*- coding: utf-8 -*-
"""
morphsnakes
===========
This is a Python implementation of the algorithms introduced in the paper
Márquez-Neila, P., Baumela, L., Álvarez, L., "A morphological approach
to curvature-based evolution of curves and surfaces". IEEE Transactions
on Pattern Analysis and Machine Intelligence (PAMI), 2013.
This implementation is intended to be as brief, understandable and self-contained
as possible. It does not include any enhancement to make it fast or efficient.
Any practical implementation of this algorithm should work only over the
neighbor pixels of the 0.5-levelset, not over all the embedding function,
and perhaps should feature multi-threading or GPU capabilities.
The classes MorphGAC and MorphACWE provide most of the functionality of this
module. They implement the Morphological Geodesic Active Contours and the
Morphological Active Contours without Edges, respectively. See the
aforementioned paper for full details.
See test.py for examples of usage.
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage
from scipy.ndimage import binary_dilation, binary_erosion, \
gaussian_filter, gaussian_gradient_magnitude
class fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3), np.array([[0,1,0]]*3), np.flipud(np.eye(3)), np.rot90([[0,1,0]]*3)]
_P3 = [np.zeros((3,3,3)) for i in range(9)]
_P3[0][:,:,1] = 1
_P3[1][:,1,:] = 1
_P3[2][1,:,:] = 1
_P3[3][:,[0,1,2],[0,1,2]] = 1
_P3[4][:,[0,1,2],[2,1,0]] = 1
_P3[5][[0,1,2],:,[0,1,2]] = 1
_P3[6][[0,1,2],:,[2,1,0]] = 1
_P3[7][[0,1,2],[0,1,2],:] = 1
_P3[8][[0,1,2],[2,1,0],:] = 1
_aux = np.zeros((0))
def SI(u):
"""SI operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions (should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for i in range(len(P)):
_aux[i] = binary_erosion(u, P[i])
return _aux.max(0)
def IS(u):
"""IS operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions (should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for i in range(len(P)):
_aux[i] = binary_dilation(u, P[i])
return _aux.min(0)
# SIoIS operator.
SIoIS = lambda u: SI(IS(u))
ISoSI = lambda u: IS(SI(u))
curvop = fcycle([SIoIS, ISoSI])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0/np.sqrt(1.0 + alpha*gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set (use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = u>0
outside = u<=0
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1)**2 - self.lambda2*(data - c0)**2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological Chan-Vese method."""
for i in range(iterations):
self.step()
class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution (the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter (ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for i in range(iterations):
self.step()
def evolve_visual(msnake, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
from matplotlib import pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
fig = ppl.gcf()
fig.clf()
ax1 = fig.add_subplot(1,2,1)
if background is None:
ax1.imshow(msnake.data, cmap=ppl.cm.gray)
else:
ax1.imshow(background, cmap=ppl.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1,2,2)
ax_u = ax2.imshow(msnake.levelset)
ppl.pause(0.001)
# Iterate.
for i in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#ppl.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, levelset=None, num_iters=20):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
"""
from mayavi import mlab
import matplotlib.pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=True)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print("Iteration %s/%s..." % (i + 1, num_iters))
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
|
braysia/CellTK
|
celltk/utils/morphsnakes.py
|
Python
|
mit
| 11,905
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import matplotlib.pyplot as plt
import csv
import os
if len(sys.argv) < 4 or not sys.argv[1] in ['points', 'result']:
print "Usage: plot-clusters.py (points|result) <src-file> <pdf-file-prefix>"
sys.exit(1)
inFile = sys.argv[1]
inFile = sys.argv[2]
outFilePx = sys.argv[3]
inFileName = os.path.splitext(os.path.basename(inFile))[0]
outFile = os.path.join(".", outFilePx+"-plot.pdf")
########### READ DATA
cs = []
xs = []
ys = []
minX = None
maxX = None
minY = None
maxY = None
if sys.argv[1] == 'points':
with open(inFile, 'rb') as file:
for line in file:
# parse data
csvData = line.strip().split(' ')
x = float(csvData[0])
y = float(csvData[1])
if not minX or minX > x:
minX = x
if not maxX or maxX < x:
maxX = x
if not minY or minY > y:
minY = y
if not maxY or maxY < y:
maxY = y
xs.append(x)
ys.append(y)
# plot data
plt.clf()
plt.scatter(xs, ys, s=25, c="#999999", edgecolors='None', alpha=1.0)
plt.ylim([minY,maxY])
plt.xlim([minX,maxX])
elif sys.argv[1] == 'result':
with open(inFile, 'rb') as file:
for line in file:
# parse data
csvData = line.strip().split(' ')
c = int(csvData[0])
x = float(csvData[1])
y = float(csvData[2])
cs.append(c)
xs.append(x)
ys.append(y)
# plot data
plt.clf()
plt.scatter(xs, ys, s=25, c=cs, edgecolors='None', alpha=1.0)
plt.ylim([minY,maxY])
plt.xlim([minX,maxX])
plt.savefig(outFile, dpi=600)
print "\nPlotted file: %s" % outFile
sys.exit(0)
|
aspoman/flink-china-doc
|
quickstart/plotPoints.py
|
Python
|
apache-2.0
| 2,413
|
import math
li=[]
for i in range(3,200,2):
ro=int(math.sqrt(i))
f=1
for j in range(3,ro+1,2):
if(i%j==0):
f=0
break
if(f==1):
li.append(i)
print(li)
print(len(li))
#li=[123]
for i in li:
n=i
for j in range(len(str(i))):
r=n%10
n=(n//10)+r*(10**(len(str(i))-1))
#print(n,r)
if(j not in li):
li.remove(i)
break
print(li)
print(len(li))
|
akash-akya/Project-Euler
|
Euler_P35.py
|
Python
|
gpl-2.0
| 366
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class LocalTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.available_phone_numbers(country_code="US") \
.local.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/AvailablePhoneNumbers/US/Local.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"available_phone_numbers": [
{
"address_requirements": "none",
"beta": false,
"capabilities": {
"mms": true,
"sms": false,
"voice": true
},
"friendly_name": "(808) 925-1571",
"iso_country": "US",
"lata": "834",
"latitude": "19.720000",
"locality": "Hilo",
"longitude": "-155.090000",
"phone_number": "+18089251571",
"postal_code": "96720",
"rate_center": "HILO",
"region": "HI"
}
],
"end": 1,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers/US/Local.json?PageSize=50&Page=0",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers/US/Local.json?PageSize=50&Page=0",
"next_page_uri": null,
"num_pages": 1,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"total": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers/US/Local.json?PageSize=1"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.available_phone_numbers(country_code="US") \
.local.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"available_phone_numbers": [],
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers/US/Local.json?PageSize=50&Page=0",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers/US/Local.json?PageSize=50&Page=0",
"next_page_uri": null,
"num_pages": 1,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"total": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers/US/Local.json?PageSize=1"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.available_phone_numbers(country_code="US") \
.local.list()
self.assertIsNotNone(actual)
|
tysonholub/twilio-python
|
tests/integration/api/v2010/account/available_phone_number/test_local.py
|
Python
|
mit
| 4,003
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import StringIO
import sys
import mock
from oslo_config import cfg
import rtslib
from cinder.cmd import all as cinder_all
from cinder.cmd import api as cinder_api
from cinder.cmd import backup as cinder_backup
from cinder.cmd import manage as cinder_manage
from cinder.cmd import rtstool as cinder_rtstool
from cinder.cmd import scheduler as cinder_scheduler
from cinder.cmd import volume as cinder_volume
from cinder.cmd import volume_usage_audit
from cinder import context
from cinder import test
from cinder import version
CONF = cfg.CONF
class TestCinderApiCmd(test.TestCase):
"""Unit test cases for python modules under cinder/cmd."""
def setUp(self):
super(TestCinderApiCmd, self).setUp()
sys.argv = ['cinder-api']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderApiCmd, self).tearDown()
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, rpc_init, process_launcher,
wsgi_service):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
cinder_api.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
rpc_init.assert_called_once_with(CONF)
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_called_once_with(server,
workers=server.workers)
launcher.wait.assert_called_once_with()
class TestCinderBackupCmd(test.TestCase):
def setUp(self):
super(TestCinderBackupCmd, self).setUp()
sys.argv = ['cinder-backup']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderBackupCmd, self).tearDown()
@mock.patch('cinder.service.wait')
@mock.patch('cinder.service.serve')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, service_create, service_serve,
service_wait):
server = service_create.return_value
cinder_backup.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
service_create.assert_called_once_with(binary='cinder-backup')
service_serve.assert_called_once_with(server)
service_wait.assert_called_once_with()
class TestCinderAllCmd(test.TestCase):
def setUp(self):
super(TestCinderAllCmd, self).setUp()
sys.argv = ['cinder-all']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderAllCmd, self).tearDown()
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, get_logger, monkey_patch, process_launcher,
wsgi_service, service_create):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
service = service_create.return_value
cinder_all.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder.all')
monkey_patch.assert_called_once_with()
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_any_call(server, workers=server.workers)
service_create.assert_has_calls([mock.call(binary='cinder-volume'),
mock.call(binary='cinder-scheduler'),
mock.call(binary='cinder-backup')])
self.assertEqual(3, service_create.call_count)
launcher.launch_service.assert_has_calls([mock.call(service)] * 3)
self.assertEqual(4, launcher.launch_service.call_count)
launcher.wait.assert_called_once_with()
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
def test_main_load_osapi_volume_exception(self, log_setup, get_logger,
monkey_patch, process_launcher,
wsgi_service, service_create):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
mock_log = get_logger.return_value
for ex in (Exception(), SystemExit()):
launcher.launch_service.side_effect = ex
cinder_all.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder.all')
monkey_patch.assert_called_once_with()
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_any_call(server,
workers=server.workers)
self.assertTrue(mock_log.exception.called)
# Reset for the next exception
log_setup.reset_mock()
get_logger.reset_mock()
monkey_patch.reset_mock()
process_launcher.reset_mock()
wsgi_service.reset_mock()
mock_log.reset_mock()
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
def test_main_load_binary_exception(self, log_setup, get_logger,
monkey_patch, process_launcher,
wsgi_service, service_create):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
service = service_create.return_value
mock_log = get_logger.return_value
def launch_service(*args, **kwargs):
if service in args:
raise Exception()
launcher.launch_service.side_effect = launch_service
cinder_all.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder.all')
monkey_patch.assert_called_once_with()
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_any_call(server,
workers=server.workers)
for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']:
service_create.assert_any_call(binary=binary)
launcher.launch_service.assert_called_with(service)
self.assertTrue(mock_log.exception.called)
class TestCinderSchedulerCmd(test.TestCase):
def setUp(self):
super(TestCinderSchedulerCmd, self).setUp()
sys.argv = ['cinder-scheduler']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderSchedulerCmd, self).tearDown()
@mock.patch('cinder.service.wait')
@mock.patch('cinder.service.serve')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, service_create,
service_serve, service_wait):
server = service_create.return_value
cinder_scheduler.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
service_create.assert_called_once_with(binary='cinder-scheduler')
service_serve.assert_called_once_with(server)
service_wait.assert_called_once_with()
class TestCinderVolumeCmd(test.TestCase):
def setUp(self):
super(TestCinderVolumeCmd, self).setUp()
sys.argv = ['cinder-volume']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderVolumeCmd, self).tearDown()
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main(self, log_setup, monkey_patch, service_create,
get_launcher):
CONF.set_override('enabled_backends', None)
launcher = get_launcher.return_value
server = service_create.return_value
cinder_volume.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
get_launcher.assert_called_once_with()
service_create.assert_called_once_with(binary='cinder-volume')
launcher.launch_service.assert_called_once_with(server)
launcher.wait.assert_called_once_with()
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('oslo_log.log.setup')
def test_main_with_backends(self, log_setup, monkey_patch, service_create,
get_launcher):
backends = ['backend1', 'backend2']
CONF.set_override('enabled_backends', backends)
launcher = get_launcher.return_value
cinder_volume.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
monkey_patch.assert_called_once_with()
get_launcher.assert_called_once_with()
self.assertEqual(len(backends), service_create.call_count)
self.assertEqual(len(backends), launcher.launch_service.call_count)
launcher.wait.assert_called_once_with()
class TestCinderManageCmd(test.TestCase):
def setUp(self):
super(TestCinderManageCmd, self).setUp()
sys.argv = ['cinder-manage']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderManageCmd, self).tearDown()
@mock.patch('oslo_utils.uuidutils.is_uuid_like')
def test_param2id(self, is_uuid_like):
mock_object_id = mock.MagicMock()
is_uuid_like.return_value = True
object_id = cinder_manage.param2id(mock_object_id)
self.assertEqual(mock_object_id, object_id)
is_uuid_like.assert_called_once_with(mock_object_id)
@mock.patch('oslo_utils.uuidutils.is_uuid_like')
def test_param2id_int_string(self, is_uuid_like):
object_id_str = '10'
is_uuid_like.return_value = False
object_id = cinder_manage.param2id(object_id_str)
self.assertEqual(10, object_id)
is_uuid_like.assert_called_once_with(object_id_str)
@mock.patch('cinder.db.migration.db_sync')
def test_db_commands_sync(self, db_sync):
version = mock.MagicMock()
db_cmds = cinder_manage.DbCommands()
db_cmds.sync(version=version)
db_sync.assert_called_once_with(version)
@mock.patch('oslo_db.sqlalchemy.migration.db_version')
def test_db_commands_version(self, db_version):
db_cmds = cinder_manage.DbCommands()
db_cmds.version()
self.assertEqual(1, db_version.call_count)
@mock.patch('cinder.version.version_string')
def test_versions_commands_list(self, version_string):
version_cmds = cinder_manage.VersionCommands()
version_cmds.list()
version_string.assert_called_once_with()
@mock.patch('cinder.version.version_string')
def test_versions_commands_call(self, version_string):
version_cmds = cinder_manage.VersionCommands()
version_cmds.__call__()
version_string.assert_called_once_with()
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_host_commands_list(self, get_admin_context, service_get_all):
get_admin_context.return_value = mock.sentinel.ctxt
service_get_all.return_value = [{'host': 'fake-host',
'availability_zone': 'fake-az'}]
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ("%(host)-25s\t%(zone)-15s\n" %
{'host': 'host', 'zone': 'zone'})
expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" %
{'host': 'fake-host',
'availability_zone': 'fake-az'})
host_cmds = cinder_manage.HostCommands()
host_cmds.list()
get_admin_context.assert_called_once_with()
service_get_all.assert_called_once_with(mock.sentinel.ctxt)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_host_commands_list_with_zone(self, get_admin_context,
service_get_all):
get_admin_context.return_value = mock.sentinel.ctxt
service_get_all.return_value = [{'host': 'fake-host',
'availability_zone': 'fake-az1'},
{'host': 'fake-host',
'availability_zone': 'fake-az2'}]
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ("%(host)-25s\t%(zone)-15s\n" %
{'host': 'host', 'zone': 'zone'})
expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" %
{'host': 'fake-host',
'availability_zone': 'fake-az1'})
host_cmds = cinder_manage.HostCommands()
host_cmds.list(zone='fake-az1')
get_admin_context.assert_called_once_with()
service_get_all.assert_called_once_with(mock.sentinel.ctxt)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.objects.base.CinderObjectSerializer')
@mock.patch('cinder.rpc.get_client')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.rpc.initialized', return_value=False)
@mock.patch('oslo_messaging.Target')
def test_volume_commands_init(self, messaging_target, rpc_initialized,
rpc_init, get_client, object_serializer):
CONF.set_override('volume_topic', 'fake-topic')
mock_target = messaging_target.return_value
mock_rpc_client = get_client.return_value
volume_cmds = cinder_manage.VolumeCommands()
rpc_client = volume_cmds.rpc_client()
rpc_initialized.assert_called_once_with()
rpc_init.assert_called_once_with(CONF)
messaging_target.assert_called_once_with(topic='fake-topic')
get_client.assert_called_once_with(mock_target,
serializer=object_serializer())
self.assertEqual(mock_rpc_client, rpc_client)
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.get_client')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete(self, rpc_init, get_client,
get_admin_context, volume_get):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
mock_client = mock.MagicMock()
cctxt = mock.MagicMock()
mock_client.prepare.return_value = cctxt
get_client.return_value = mock_client
volume_id = '123'
volume = {'id': volume_id, 'host': 'fake-host', 'status': 'available'}
volume_get.return_value = volume
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds._client = mock_client
volume_cmds.delete(volume_id)
volume_get.assert_called_once_with(ctxt, 123)
mock_client.prepare.assert_called_once_with(server=volume['host'])
cctxt.cast.assert_called_once_with(ctxt, 'delete_volume',
volume_id=volume['id'])
@mock.patch('cinder.db.volume_destroy')
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete_no_host(self, rpc_init, get_admin_context,
volume_get, volume_destroy):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
volume_id = '123'
volume = {'id': volume_id, 'host': None, 'status': 'available'}
volume_get.return_value = volume
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ('Volume not yet assigned to host.\n'
'Deleting volume from database and skipping'
' rpc.\n')
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds.delete(volume_id)
get_admin_context.assert_called_once_with()
volume_get.assert_called_once_with(ctxt, 123)
volume_destroy.assert_called_once_with(ctxt, 123)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.db.volume_destroy')
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete_volume_in_use(self, rpc_init,
get_admin_context,
volume_get, volume_destroy):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
volume_id = '123'
volume = {'id': volume_id, 'host': 'fake-host', 'status': 'in-use'}
volume_get.return_value = volume
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ('Volume is in-use.\n'
'Detach volume from instance and then try'
' again.\n')
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds.delete(volume_id)
volume_get.assert_called_once_with(ctxt, 123)
self.assertEqual(expected_out, fake_out.getvalue())
def test_config_commands_list(self):
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ''
for key, value in CONF.iteritems():
expected_out += '%s = %s' % (key, value) + '\n'
config_cmds = cinder_manage.ConfigCommands()
config_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
def test_config_commands_list_param(self):
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
CONF.set_override('host', 'fake')
expected_out = 'host = fake\n'
config_cmds = cinder_manage.ConfigCommands()
config_cmds.list(param='host')
self.assertEqual(expected_out, fake_out.getvalue())
def test_get_log_commands_no_errors(self):
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
CONF.set_override('log_dir', None)
expected_out = 'No errors in logfiles!\n'
get_log_cmds = cinder_manage.GetLogCommands()
get_log_cmds.errors()
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('__builtin__.open')
@mock.patch('os.listdir')
def test_get_log_commands_errors(self, listdir, open):
CONF.set_override('log_dir', 'fake-dir')
listdir.return_value = ['fake-error.log']
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
open.return_value = StringIO.StringIO(
'[ ERROR ] fake-error-message')
expected_out = ('fake-dir/fake-error.log:-\n'
'Line 1 : [ ERROR ] fake-error-message\n')
get_log_cmds = cinder_manage.GetLogCommands()
get_log_cmds.errors()
self.assertEqual(expected_out, fake_out.getvalue())
open.assert_called_once_with('fake-dir/fake-error.log', 'r')
listdir.assert_called_once_with(CONF.log_dir)
@mock.patch('__builtin__.open')
@mock.patch('os.path.exists')
def test_get_log_commands_syslog_no_log_file(self, path_exists, open):
path_exists.return_value = False
get_log_cmds = cinder_manage.GetLogCommands()
exit = self.assertRaises(SystemExit, get_log_cmds.syslog)
self.assertEqual(exit.code, 1)
path_exists.assert_any_call('/var/log/syslog')
path_exists.assert_any_call('/var/log/messages')
@mock.patch('cinder.db.backup_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_backup_commands_list(self, get_admin_context, backup_get_all):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
backup = {'id': 1,
'user_id': 'fake-user-id',
'project_id': 'fake-project-id',
'host': 'fake-host',
'display_name': 'fake-display-name',
'container': 'fake-container',
'status': 'fake-status',
'size': 123,
'object_count': 1}
backup_get_all.return_value = [backup]
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s'
'\t%-12s')
header = hdr % ('ID',
'User ID',
'Project ID',
'Host',
'Name',
'Container',
'Status',
'Size',
'Object Count')
res = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d'
'\t%-12s')
resource = res % (backup['id'],
backup['user_id'],
backup['project_id'],
backup['host'],
backup['display_name'],
backup['container'],
backup['status'],
backup['size'],
1)
expected_out = header + '\n' + resource + '\n'
backup_cmds = cinder_manage.BackupCommands()
backup_cmds.list()
get_admin_context.assert_called_once_with()
backup_get_all.assert_called_once_with(ctxt)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.utils.service_is_up')
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_service_commands_list(self, get_admin_context, service_get_all,
service_is_up):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
service = {'binary': 'cinder-binary',
'host': 'fake-host.fake-domain',
'availability_zone': 'fake-zone',
'updated_at': '2014-06-30 11:22:33',
'disabled': False}
service_get_all.return_value = [service]
service_is_up.return_value = True
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print_format = format % ('Binary',
'Host',
'Zone',
'Status',
'State',
'Updated At')
service_format = format % (service['binary'],
service['host'].partition('.')[0],
service['availability_zone'],
'enabled',
':-)',
service['updated_at'])
expected_out = print_format + '\n' + service_format + '\n'
service_cmds = cinder_manage.ServiceCommands()
service_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
get_admin_context.assert_called_with()
service_get_all.assert_called_with(ctxt)
service_is_up.assert_called_with(service)
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_argv_lt_2(self, register_cli_opt):
script_name = 'cinder-manage'
sys.argv = [script_name]
CONF(sys.argv[1:], project='cinder', version=version.version_string())
exit = self.assertRaises(SystemExit, cinder_manage.main)
self.assertTrue(register_cli_opt.called)
self.assertEqual(exit.code, 2)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_log.log.setup')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_sudo_failed(self, register_cli_opt, log_setup,
config_opts_call):
script_name = 'cinder-manage'
sys.argv = [script_name, 'fake_category', 'fake_action']
config_opts_call.side_effect = cfg.ConfigFilesNotFoundError(
mock.sentinel._namespace)
exit = self.assertRaises(SystemExit, cinder_manage.main)
self.assertTrue(register_cli_opt.called)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='cinder',
version=version.version_string())
self.assertFalse(log_setup.called)
self.assertEqual(exit.code, 2)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main(self, register_cli_opt, config_opts_call):
script_name = 'cinder-manage'
sys.argv = [script_name, 'config', 'list']
action_fn = mock.MagicMock()
CONF.category = mock.MagicMock(action_fn=action_fn)
cinder_manage.main()
self.assertTrue(register_cli_opt.called)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='cinder', version=version.version_string())
self.assertTrue(action_fn.called)
class TestCinderRtstoolCmd(test.TestCase):
def setUp(self):
super(TestCinderRtstoolCmd, self).setUp()
sys.argv = ['cinder-rtstool']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderRtstoolCmd, self).tearDown()
@mock.patch('rtslib.root.RTSRoot')
def test_create_rtslib_error(self, rtsroot):
rtsroot.side_effect = rtslib.utils.RTSLibError()
self.assertRaises(rtslib.utils.RTSLibError, cinder_rtstool.create,
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled)
def _test_create_rtslib_error_network_portal(self, ip):
with mock.patch('rtslib.NetworkPortal') as network_portal, \
mock.patch('rtslib.LUN') as lun, \
mock.patch('rtslib.TPG') as tpg, \
mock.patch('rtslib.FabricModule') as fabric_module, \
mock.patch('rtslib.Target') as target, \
mock.patch('rtslib.BlockStorageObject') as \
block_storage_object, \
mock.patch('rtslib.root.RTSRoot') as rts_root:
root_new = mock.MagicMock(storage_objects=mock.MagicMock())
rts_root.return_value = root_new
block_storage_object.return_value = mock.sentinel.so_new
target.return_value = mock.sentinel.target_new
fabric_module.return_value = mock.sentinel.fabric_new
tpg_new = tpg.return_value
lun.return_value = mock.sentinel.lun_new
if ip == '0.0.0.0':
network_portal.side_effect = rtslib.utils.RTSLibError()
self.assertRaises(rtslib.utils.RTSLibError,
cinder_rtstool.create,
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled)
else:
cinder_rtstool.create(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled)
rts_root.assert_called_once_with()
block_storage_object.assert_called_once_with(
name=mock.sentinel.name, dev=mock.sentinel.backing_device)
target.assert_called_once_with(mock.sentinel.fabric_new,
mock.sentinel.name, 'create')
fabric_module.assert_called_once_with('iscsi')
tpg.assert_called_once_with(mock.sentinel.target_new,
mode='create')
tpg_new.set_attribute.assert_called_once_with('authentication',
'1')
lun.assert_called_once_with(tpg_new,
storage_object=mock.sentinel.so_new)
self.assertEqual(1, tpg_new.enable)
network_portal.assert_any_call(tpg_new, ip, 3260,
mode='any')
if ip == '::0':
network_portal.assert_any_call(tpg_new, ip, 3260, mode='any')
def test_create_rtslib_error_network_portal_ipv4(self):
self._test_create_rtslib_error_network_portal('0.0.0.0')
def test_create_rtslib_error_network_portal_ipv6(self):
self._test_create_rtslib_error_network_portal('::0')
def _test_create(self, ip):
with mock.patch('rtslib.NetworkPortal') as network_portal, \
mock.patch('rtslib.LUN') as lun, \
mock.patch('rtslib.TPG') as tpg, \
mock.patch('rtslib.FabricModule') as fabric_module, \
mock.patch('rtslib.Target') as target, \
mock.patch('rtslib.BlockStorageObject') as \
block_storage_object, \
mock.patch('rtslib.root.RTSRoot') as rts_root:
root_new = mock.MagicMock(storage_objects=mock.MagicMock())
rts_root.return_value = root_new
block_storage_object.return_value = mock.sentinel.so_new
target.return_value = mock.sentinel.target_new
fabric_module.return_value = mock.sentinel.fabric_new
tpg_new = tpg.return_value
lun.return_value = mock.sentinel.lun_new
def network_portal_exception(*args, **kwargs):
if set([tpg_new, '::0', 3260]).issubset(list(args)):
raise rtslib.utils.RTSLibError()
else:
pass
cinder_rtstool.create(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled)
rts_root.assert_called_once_with()
block_storage_object.assert_called_once_with(
name=mock.sentinel.name, dev=mock.sentinel.backing_device)
target.assert_called_once_with(mock.sentinel.fabric_new,
mock.sentinel.name, 'create')
fabric_module.assert_called_once_with('iscsi')
tpg.assert_called_once_with(mock.sentinel.target_new,
mode='create')
tpg_new.set_attribute.assert_called_once_with('authentication',
'1')
lun.assert_called_once_with(tpg_new,
storage_object=mock.sentinel.so_new)
self.assertEqual(1, tpg_new.enable)
network_portal.assert_any_call(tpg_new, ip, 3260,
mode='any')
if ip == '::0':
network_portal.assert_any_call(tpg_new, ip, 3260, mode='any')
def test_create_ipv4(self):
self._test_create('0.0.0.0')
def test_create_ipv6(self):
self._test_create('::0')
@mock.patch.object(cinder_rtstool, 'rtslib', autospec=True)
def test_create_ips_and_port(self, mock_rtslib):
port = 3261
ips = ['ip1', 'ip2', 'ip3']
mock_rtslib.BlockStorageObject.return_value = mock.sentinel.bso
mock_rtslib.Target.return_value = mock.sentinel.target_new
mock_rtslib.FabricModule.return_value = mock.sentinel.iscsi_fabric
tpg_new = mock_rtslib.TPG.return_value
cinder_rtstool.create(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
portals_ips=ips,
portals_port=port)
mock_rtslib.Target.assert_called_once_with(mock.sentinel.iscsi_fabric,
mock.sentinel.name,
'create')
mock_rtslib.TPG.assert_called_once_with(mock.sentinel.target_new,
mode='create')
mock_rtslib.LUN.assert_called_once_with(
tpg_new,
storage_object=mock.sentinel.bso)
mock_rtslib.NetworkPortal.assert_has_calls(
map(lambda ip: mock.call(tpg_new, ip, port, mode='any'), ips),
any_order=True
)
@mock.patch('rtslib.root.RTSRoot')
def test_add_initiator_rtslib_error(self, rtsroot):
rtsroot.side_effect = rtslib.utils.RTSLibError()
self.assertRaises(rtslib.utils.RTSLibError,
cinder_rtstool.add_initiator,
mock.sentinel.target_iqn,
mock.sentinel.initiator_iqn,
mock.sentinel.userid,
mock.sentinel.password)
@mock.patch('rtslib.root.RTSRoot')
def test_add_initiator_rtstool_error(self, rtsroot):
rtsroot.targets.return_value = {}
self.assertRaises(cinder_rtstool.RtstoolError,
cinder_rtstool.add_initiator,
mock.sentinel.target_iqn,
mock.sentinel.initiator_iqn,
mock.sentinel.userid,
mock.sentinel.password)
@mock.patch('rtslib.MappedLUN')
@mock.patch('rtslib.NodeACL')
@mock.patch('rtslib.root.RTSRoot')
def test_add_initiator_acl_exists(self, rtsroot, node_acl, mapped_lun):
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': mock.sentinel.initiator_iqn}]
acl = mock.MagicMock(node_wwn=mock.sentinel.initiator_iqn)
tpg = mock.MagicMock(node_acls=[acl])
tpgs = mock.MagicMock()
tpgs.next.return_value = tpg
target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
cinder_rtstool.add_initiator(target_iqn,
mock.sentinel.initiator_iqn,
mock.sentinel.userid,
mock.sentinel.password)
self.assertFalse(node_acl.called)
self.assertFalse(mapped_lun.called)
@mock.patch('rtslib.MappedLUN')
@mock.patch('rtslib.NodeACL')
@mock.patch('rtslib.root.RTSRoot')
def test_add_initiator(self, rtsroot, node_acl, mapped_lun):
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': mock.sentinel.initiator_iqn}]
tpg = mock.MagicMock()
target = mock.MagicMock(tpgs=tpg, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
acl_new = mock.MagicMock(chap_userid=mock.sentinel.userid,
chap_password=mock.sentinel.password)
node_acl.return_value = acl_new
cinder_rtstool.add_initiator(target_iqn,
mock.sentinel.initiator_iqn,
mock.sentinel.userid,
mock.sentinel.password)
node_acl.assert_called_once_with(tpg.next(),
mock.sentinel.initiator_iqn,
mode='create')
mapped_lun.assert_called_once_with(acl_new, 0, tpg_lun=0)
@mock.patch('rtslib.root.RTSRoot')
def test_get_targets(self, rtsroot):
target = mock.MagicMock()
target.dump.return_value = {'wwn': 'fake-wwn'}
rtsroot.return_value = mock.MagicMock(targets=[target])
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
cinder_rtstool.get_targets()
self.assertEqual(str(target.wwn), fake_out.getvalue().strip())
@mock.patch('rtslib.root.RTSRoot')
def test_delete(self, rtsroot):
target = mock.MagicMock(wwn=mock.sentinel.iqn)
storage_object = mock.MagicMock()
name = mock.PropertyMock(return_value=mock.sentinel.iqn)
type(storage_object).name = name
rtsroot.return_value = mock.MagicMock(
targets=[target], storage_objects=[storage_object])
cinder_rtstool.delete(mock.sentinel.iqn)
target.delete.assert_called_once_with()
storage_object.delete.assert_called_once_with()
@mock.patch.object(cinder_rtstool, 'rtslib', autospec=True)
def test_save(self, mock_rtslib):
filename = mock.sentinel.filename
cinder_rtstool.save_to_file(filename)
rtsroot = mock_rtslib.root.RTSRoot
rtsroot.assert_called_once_with()
rtsroot.return_value.save_to_file.assert_called_once_with(filename)
def test_usage(self):
exit = self.assertRaises(SystemExit, cinder_rtstool.usage)
self.assertEqual(exit.code, 1)
@mock.patch('cinder.cmd.rtstool.usage')
def test_main_argc_lt_2(self, usage):
usage.side_effect = SystemExit(1)
sys.argv = ['cinder-rtstool']
exit = self.assertRaises(SystemExit, cinder_rtstool.usage)
self.assertTrue(usage.called)
self.assertEqual(exit.code, 1)
def test_main_create_argv_lt_6(self):
sys.argv = ['cinder-rtstool', 'create']
self._test_main_check_argv()
def test_main_create_argv_gt_7(self):
sys.argv = ['cinder-rtstool', 'create', 'fake-arg1', 'fake-arg2',
'fake-arg3', 'fake-arg4', 'fake-arg5', 'fake-arg6']
self._test_main_check_argv()
def test_main_add_initiator_argv_lt_6(self):
sys.argv = ['cinder-rtstool', 'add-initiator']
self._test_main_check_argv()
def test_main_delete_argv_lt_3(self):
sys.argv = ['cinder-rtstool', 'delete']
self._test_main_check_argv()
def test_main_no_action(self):
sys.argv = ['cinder-rtstool']
self._test_main_check_argv()
def _test_main_check_argv(self):
with mock.patch('cinder.cmd.rtstool.usage') as usage:
usage.side_effect = SystemExit(1)
sys.argv = ['cinder-rtstool', 'create']
exit = self.assertRaises(SystemExit, cinder_rtstool.main)
self.assertTrue(usage.called)
self.assertEqual(exit.code, 1)
@mock.patch('cinder.cmd.rtstool.save_to_file')
def test_main_save(self, mock_save):
sys.argv = ['cinder-rtstool',
'save']
rc = cinder_rtstool.main()
mock_save.assert_called_once_with(None)
self.assertEqual(0, rc)
@mock.patch('cinder.cmd.rtstool.save_to_file')
def test_main_save_with_file(self, mock_save):
sys.argv = ['cinder-rtstool',
'save',
mock.sentinel.filename]
rc = cinder_rtstool.main()
mock_save.assert_called_once_with(mock.sentinel.filename)
self.assertEqual(0, rc)
def test_main_create(self):
with mock.patch('cinder.cmd.rtstool.create') as create:
sys.argv = ['cinder-rtstool',
'create',
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
str(mock.sentinel.initiator_iqns)]
rc = cinder_rtstool.main()
create.assert_called_once_with(
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
initiator_iqns=str(mock.sentinel.initiator_iqns))
self.assertEqual(0, rc)
@mock.patch('cinder.cmd.rtstool.create')
def test_main_create_ips_and_port(self, mock_create):
sys.argv = ['cinder-rtstool',
'create',
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
str(mock.sentinel.initiator_iqns),
'-p3261',
'-aip1,ip2,ip3']
rc = cinder_rtstool.main()
mock_create.assert_called_once_with(
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.iser_enabled,
initiator_iqns=str(mock.sentinel.initiator_iqns),
portals_ips=['ip1', 'ip2', 'ip3'],
portals_port=3261)
self.assertEqual(0, rc)
def test_main_add_initiator(self):
with mock.patch('cinder.cmd.rtstool.add_initiator') as add_initiator:
sys.argv = ['cinder-rtstool',
'add-initiator',
mock.sentinel.target_iqn,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.initiator_iqns]
rc = cinder_rtstool.main()
add_initiator.assert_called_once_with(
mock.sentinel.target_iqn, mock.sentinel.initiator_iqns,
mock.sentinel.userid, mock.sentinel.password)
self.assertEqual(0, rc)
def test_main_get_targets(self):
with mock.patch('cinder.cmd.rtstool.get_targets') as get_targets:
sys.argv = ['cinder-rtstool', 'get-targets']
rc = cinder_rtstool.main()
get_targets.assert_called_once_with()
self.assertEqual(0, rc)
def test_main_delete(self):
with mock.patch('cinder.cmd.rtstool.delete') as delete:
sys.argv = ['cinder-rtstool', 'delete', mock.sentinel.iqn]
rc = cinder_rtstool.main()
delete.assert_called_once_with(mock.sentinel.iqn)
self.assertEqual(0, rc)
def test_main_verify(self):
with mock.patch('cinder.cmd.rtstool.verify_rtslib') as verify_rtslib:
sys.argv = ['cinder-rtstool', 'verify']
rc = cinder_rtstool.main()
verify_rtslib.assert_called_once_with()
self.assertEqual(0, rc)
class TestCinderVolumeUsageAuditCmd(test.TestCase):
def setUp(self):
super(TestCinderVolumeUsageAuditCmd, self).setUp()
sys.argv = ['cinder-volume-usage-audit']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderVolumeUsageAuditCmd, self).tearDown()
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_time_error(self, get_admin_context, log_setup, get_logger,
version_string, rpc_init,
last_completed_audit_period):
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2013-01-01 01:00:00')
last_completed_audit_period.return_value = (mock.sentinel.begin,
mock.sentinel.end)
exit = self.assertRaises(SystemExit, volume_usage_audit.main)
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
self.assertEqual(exit.code, -1)
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_create_volume_error(self, get_admin_context, log_setup,
get_logger, version_string,
rpc_init,
last_completed_audit_period,
volume_get_active_by_window,
notify_about_volume_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0)
end = datetime.datetime(2014, 2, 2, 2, 0)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
volume1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
def _notify_about_volume_usage(*args, **kwargs):
if 'create.end' in args:
raise Exception()
else:
pass
notify_about_volume_usage.side_effect = _notify_about_volume_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_active_by_window.assert_called_once_with(ctxt, begin, end)
notify_about_volume_usage.assert_any_call(ctxt, volume1, 'exists',
extra_usage_info=extra_info)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.start', extra_usage_info=local_extra_info)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.end', extra_usage_info=local_extra_info)
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_delete_volume_error(self, get_admin_context, log_setup,
get_logger, version_string,
rpc_init,
last_completed_audit_period,
volume_get_active_by_window,
notify_about_volume_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0)
end = datetime.datetime(2014, 2, 2, 2, 0)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
volume1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info_create = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
local_extra_info_delete = {
'audit_period_beginning': str(volume1.deleted_at),
'audit_period_ending': str(volume1.deleted_at),
}
def _notify_about_volume_usage(*args, **kwargs):
if 'delete.end' in args:
raise Exception()
else:
pass
notify_about_volume_usage.side_effect = _notify_about_volume_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_active_by_window.assert_called_once_with(ctxt, begin, end)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'exists', extra_usage_info=extra_info)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.start',
extra_usage_info=local_extra_info_create)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.end',
extra_usage_info=local_extra_info_create)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'delete.start',
extra_usage_info=local_extra_info_delete)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'delete.end',
extra_usage_info=local_extra_info_delete)
@mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
@mock.patch('cinder.db.snapshot_get_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_snapshot_error(self, get_admin_context,
log_setup, get_logger,
version_string, rpc_init,
last_completed_audit_period,
volume_get_active_by_window,
notify_about_volume_usage,
snapshot_get_active_by_window,
notify_about_snapshot_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0)
end = datetime.datetime(2014, 2, 2, 2, 0)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0)
snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
snapshot1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=snapshot1_created,
deleted_at=snapshot1_deleted)
volume_get_active_by_window.return_value = []
snapshot_get_active_by_window.return_value = [snapshot1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info_create = {
'audit_period_beginning': str(snapshot1.created_at),
'audit_period_ending': str(snapshot1.created_at),
}
local_extra_info_delete = {
'audit_period_beginning': str(snapshot1.deleted_at),
'audit_period_ending': str(snapshot1.deleted_at),
}
def _notify_about_snapshot_usage(*args, **kwargs):
# notify_about_snapshot_usage raises an exception, but does not
# block
raise Exception()
notify_about_snapshot_usage.side_effect = _notify_about_snapshot_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_active_by_window.assert_called_once_with(ctxt, begin, end)
self.assertFalse(notify_about_volume_usage.called)
notify_about_snapshot_usage.assert_any_call(ctxt, snapshot1, 'exists',
extra_info)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'create.start',
extra_usage_info=local_extra_info_create)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'delete.start',
extra_usage_info=local_extra_info_delete)
@mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
@mock.patch('cinder.db.snapshot_get_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('oslo_log.log.getLogger')
@mock.patch('oslo_log.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main(self, get_admin_context, log_setup, get_logger,
version_string, rpc_init, last_completed_audit_period,
volume_get_active_by_window, notify_about_volume_usage,
snapshot_get_active_by_window, notify_about_snapshot_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0)
end = datetime.datetime(2014, 2, 2, 2, 0)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
volume1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
extra_info_volume_create = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
extra_info_volume_delete = {
'audit_period_beginning': str(volume1.deleted_at),
'audit_period_ending': str(volume1.deleted_at),
}
snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0)
snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
snapshot1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=snapshot1_created,
deleted_at=snapshot1_deleted)
snapshot_get_active_by_window.return_value = [snapshot1]
extra_info_snapshot_create = {
'audit_period_beginning': str(snapshot1.created_at),
'audit_period_ending': str(snapshot1.created_at),
}
extra_info_snapshot_delete = {
'audit_period_beginning': str(snapshot1.deleted_at),
'audit_period_ending': str(snapshot1.deleted_at),
}
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with(CONF, "cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_active_by_window.assert_called_once_with(ctxt, begin, end)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'exists', extra_usage_info=extra_info)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.start',
extra_usage_info=extra_info_volume_create)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.end',
extra_usage_info=extra_info_volume_create)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'delete.start',
extra_usage_info=extra_info_volume_delete)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'delete.end',
extra_usage_info=extra_info_volume_delete)
notify_about_snapshot_usage.assert_any_call(ctxt, snapshot1,
'exists', extra_info)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'create.start',
extra_usage_info=extra_info_snapshot_create)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'create.end',
extra_usage_info=extra_info_snapshot_create)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'delete.start',
extra_usage_info=extra_info_snapshot_delete)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'delete.end',
extra_usage_info=extra_info_snapshot_delete)
|
tmenjo/cinder-2015.1.1
|
cinder/tests/test_cmd.py
|
Python
|
apache-2.0
| 63,587
|
from codecs import open
from os import path
from setuptools import setup, Extension
from Cython.Distutils import build_ext
import numpy
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Requirements
install_requires=['cython>=0.24.1',
'numpy>=1.6.1',
'scipy>=0.16',
'matplotlib>=1.5.1',
'scikit-learn>=0.17.1',
'nibabel>=2.0.2',
'nilearn>=0.2.4',
'GPy>=1.0.7']
setup(
name='connectopic_mapping',
version='0.3.0',
description='Connectopic mapping',
long_description=long_description,
author='Michele Damian',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='neuroscience connectopic mapping research',
packages=['connectopic_mapping'],
install_requires=install_requires,
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("connectopic_mapping.haak", ["connectopic_mapping/haak.pyx"], include_dirs=[numpy.get_include()])],
)
|
MicheleDamian/ConnectopicMapping
|
setup.py
|
Python
|
apache-2.0
| 1,540
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$',
'flooding_lib.tools.gdmapstool.views.index',
name='flooding_gdmaapstool_index'),
url(r'^gdmapdetail/(?P<gdmap_id>\d+)$',
'flooding_lib.tools.gdmapstool.views.gdmap_details',
name='flooding_gdmapstool_mapdetails'),
url(r'^reusegdmap/(?P<gdmap_id>\d+)$',
'flooding_lib.tools.gdmapstool.views.reuse_gdmap',
name='flooding_gdmapstool_reuse_gdmap'),
url(r'^loadgdmapform/(?P<gdmap_id>\d+)/$',
'flooding_lib.tools.gdmapstool.views.load_gdmap_form',
name='flooding_tools_gdmap_load_form'),
url(r'^savegdmapform/$',
'flooding_lib.tools.gdmapstool.views.save_gdmap_form',
name='flooding_tools_gdmap_save_form'),
)
|
lizardsystem/flooding-lib
|
flooding_lib/tools/gdmapstool/urls.py
|
Python
|
gpl-3.0
| 797
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from nova import flags
import sqlalchemy
from migrate.versioning import api as versioning_api
try:
from migrate.versioning import exceptions as versioning_exceptions
except ImportError:
try:
# python-migration changed location of exceptions after 1.6.3
# See LP Bug #717467
from migrate import exceptions as versioning_exceptions
except ImportError:
sys.exit(_("python-migrate is not installed. Exiting."))
FLAGS = flags.FLAGS
def db_sync(version=None):
db_version()
repo_path = _find_migrate_repo()
return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version)
def db_version():
repo_path = _find_migrate_repo()
try:
return versioning_api.db_version(FLAGS.sql_connection, repo_path)
except versioning_exceptions.DatabaseNotControlledError:
# If we aren't version controlled we may already have the database
# in the state from before we started version control, check for that
# and set up version_control appropriately
meta = sqlalchemy.MetaData()
engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False)
meta.reflect(bind=engine)
try:
for table in ('auth_tokens', 'zones', 'export_devices',
'fixed_ips', 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
'volumes'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
return db_version_control(0)
def db_version_control(version=None):
repo_path = _find_migrate_repo()
versioning_api.version_control(FLAGS.sql_connection, repo_path, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
return path
|
superstack/nova
|
nova/db/sqlalchemy/migration.py
|
Python
|
apache-2.0
| 3,168
|
# Turtle Loop - Python Code - Elizabeth Tweedale
import turtle # imports the turtle module
loopy = turtle.Turtle() # name your turtle 'loopy'
# use a for loop to repeat the forward/right code 4 times
for i in range(4):
loopy.forward(50)
loopy.right(90)
turtle.done() # turtle is done
|
elizabethtweedale/HowToCode2
|
SuperSkill-03-Artist/turtle-square-loopy.py
|
Python
|
gpl-3.0
| 332
|
# -*- coding: utf-8 -*-
import re
import time
from ..internal.Account import Account
from ..internal.misc import json
class EuroshareEu(Account):
__name__ = "EuroshareEu"
__type__ = "account"
__version__ = "0.12"
__status__ = "testing"
__description__ = """Euroshare.eu account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
def grab_info(self, user, password, data):
html = self.load("http://euroshare.eu/",
get={'lang': "en"})
m = re.search(
r'<span class="btn btn--nav green darken-3">Premium account until: (\d+/\d+/\d+ \d+:\d+:\d+)<',
html)
if m is None:
premium = False
validuntil = -1
else:
premium = True
validuntil = time.mktime(
time.strptime(
m.group(1),
"%d/%m/%Y %H:%M:%S"))
return {'validuntil': validuntil,
'trafficleft': -1, 'premium': premium}
def signin(self, user, password, data):
html = self.load("http://euroshare.eu/login.html")
if r'href="http://euroshare.eu/logout.html"' in html:
self.skip_login()
json_data = json.loads(self.load("http://euroshare.eu/ajax/_account_login.ajax.php",
post={'username': user,
'password': password,
'remember': "false",
'backlink': ""}))
if json_data.get("login_status") != "success":
self.fail_login()
|
Arno-Nymous/pyload
|
module/plugins/accounts/EuroshareEu.py
|
Python
|
gpl-3.0
| 1,755
|
import modules.pumpingsystem as ps
import pandas as pd
import numpy as np
# Pump schedule as per SCADA. rows = pumps, columns 1:=Peak, 2:=Standard, 3:Off-peak
pump_schedule_41 = np.array([[72, 42, 50],
[95, 78, 86],
[110, 110, 110],
[120, 120, 120],
[150, 150, 150]])
pump_schedule_31 = np.array([[77, 45, 45],
[92, 70, 60],
[110, 110, 110],
[120, 120, 120]])
pump_schedule_20 = np.array([[72, 47, 55],
[82, 70, 70],
[91, 87, 92],
[110, 110, 110]])
pump_schedule_IPC = np.array([[80, 45, 45],
[85, 70, 60],
[90, 82, 82],
[110, 110, 110],
[150, 150, 150]])
dummy_pump_schedule_surface = np.array([[150, 150, 150]])
# Inflows into dams
dam_inflow_profiles = pd.read_csv('input/CS3_dam_inflow_profiles.csv.gz')
inflow_41 = np.reshape(dam_inflow_profiles['41L Inflow'].values, (24, 2))
inflow_31 = np.reshape(dam_inflow_profiles['31L Inflow'].values, (24, 2))
inflow_20 = np.reshape(dam_inflow_profiles['20L Inflow'].values, (24, 2))
inflow_IPC = np.reshape(dam_inflow_profiles['IPC Inflow'].values, (24, 2))
inflow_surface = np.reshape(dam_inflow_profiles['Surface Inflow'].values, (24, 2))
# Read actual data for initial conditions and validation
actual_values = pd.read_csv('input/CS3_data_for_validation.csv.gz')
actual_status_41 = actual_values['41L Status'].values
actual_status_31 = actual_values['31L Status'].values
actual_status_20 = actual_values['20L Status'].values
actual_status_IPC = actual_values['IPC Status'].values
initial_level_41 = actual_values['41L Level'][0]
initial_level_31 = actual_values['31L Level'][0]
initial_level_20 = actual_values['20L Level'][0]
initial_level_IPC = actual_values['IPC Level'][0]
initial_level_surface = actual_values['Surface Level'][0]
# Create pump system
pump_system = ps.PumpSystem('CS3')
pump_system.add_level(ps.PumpingLevel("41L", 3000000, initial_level_41,
216.8, 3508.4, pump_schedule_41, actual_status_41[0],
inflow_41, fed_to_level="31L", pump_statuses_for_validation=actual_status_41,
n_mode_max_pumps=2, n_mode_max_level=80, n_mode_control_range=30,
n_mode_top_offset=5))
pump_system.add_level(ps.PumpingLevel("31L", 3000000, initial_level_31,
146.8, 3283.6, pump_schedule_31, actual_status_31[0],
inflow_31, fed_to_level="20L", pump_statuses_for_validation=actual_status_31,
n_mode_max_pumps=2, n_mode_max_level=80, n_mode_control_range=20,
n_mode_top_offset=5, n_mode_bottom_offset=5))
pump_system.add_level(ps.PumpingLevel("20L", 3000000, initial_level_20,
171.8, 3821.0, pump_schedule_20, actual_status_20[0],
inflow_20, fed_to_level="IPC", pump_statuses_for_validation=actual_status_20,
n_mode_max_pumps=2, n_mode_control_range=20, n_mode_top_offset=7,
n_mode_bottom_offset=5))
pump_system.add_level(ps.PumpingLevel("IPC", 3000000, initial_level_IPC,
147.4, 3572.8, pump_schedule_IPC, actual_status_IPC[0],
inflow_IPC, fed_to_level="Surface",
pump_statuses_for_validation=actual_status_IPC,
n_mode_max_pumps=2, n_mode_max_level=80, n_mode_control_range=10,
n_mode_top_offset=5, n_mode_bottom_offset=3))
pump_system.add_level(ps.PumpingLevel("Surface", 5000000, initial_level_surface,
0, 0, dummy_pump_schedule_surface, 0, inflow_surface,
pump_statuses_for_validation=actual_status_IPC,
n_mode_max_pumps=0)) # the status data doesn't matter
# Perform simulations
pump_system.perform_simulation(mode='validation', save=True)
pump_system.perform_simulation(mode='1-factor', save=True)
pump_system.perform_simulation(mode='2-factor', save=True)
pump_system.perform_simulation(mode='n-factor', save=True)
|
Mierzen/Dam-Simulation
|
simulations/Case_study_3/simulation_CS3.py
|
Python
|
gpl-3.0
| 4,643
|
#!/usr/bin/env python
# coding=utf-8
import random
import os
import sys
import unittest
import run_test
import codecs
reload(sys)
sys.setdefaultencoding("utf-8")
class TestCaseUnit(unittest.TestCase):
def test_case_1(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check1",
" birds"))
def test_case_2(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check2",
"Angrybirds"))
def test_case_3(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check3",
"123456birds"))
def test_case_4(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check4",
"_a-bbirds"))
def test_case_5(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check5",
"<>birds"))
def test_case_6(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check6",
".CAPITALbirds"))
def test_case_7(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check7",
"\nbirds"))
def test_case_8(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check8",
"*&^%!@#$%^&*()birds"))
def test_case_9(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check9",
"+-birds"))
def test_case_10(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check10",
"'birds"))
def test_case_11(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check11",
"中文birds"))
def test_case_12(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check12",
"中文 "))
def test_case_13(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check13",
"' "))
def test_case_14(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check14",
"+- "))
def test_case_15(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check15",
"*&^%!@#$%^&*() "))
def test_case_16(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check16",
"\n "))
def test_case_17(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check17",
".CAPITAL "))
def test_case_18(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check18",
"<> "))
def test_case_19(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check19",
"_a-b "))
def test_case_20(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check20",
"123456 "))
def test_case_21(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check21",
"Angry "))
def test_case_22(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check22",
" "))
def test_case_23(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check23",
" a b"))
def test_case_24(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check24",
"Angrya b"))
def test_case_25(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check25",
"123456a b"))
def test_case_26(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check26",
"_a-ba b"))
def test_case_27(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check27",
"<>a b"))
def test_case_28(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check28",
".CAPITALa b"))
def test_case_29(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check29",
"\na b"))
def test_case_30(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check30",
"*&^%!@#$%^&*()a b"))
def test_case_31(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check31",
"+-a b"))
def test_case_32(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check32",
"'a b"))
def test_case_33(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check33",
"中文a b"))
def test_case_34(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check34",
"中文b "))
def test_case_35(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check35",
"'b "))
def test_case_36(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check36",
"+-b "))
def test_case_37(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check37",
"*&^%!@#$%^&*()b "))
def test_case_38(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check38",
"\nb "))
def test_case_39(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check39",
".CAPITALb "))
def test_case_40(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check40",
"<>b "))
def test_case_41(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check41",
"_a-bb "))
def test_case_42(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check42",
"123456b "))
def test_case_43(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check43",
"Angryb "))
def test_case_44(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check44",
" b "))
def test_case_45(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check45",
" BIRDS."))
def test_case_46(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check46",
"AngryBIRDS."))
def test_case_47(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check47",
"123456BIRDS."))
def test_case_48(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check48",
"_a-bBIRDS."))
def test_case_49(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check49",
"<>BIRDS."))
def test_case_50(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check50",
".CAPITALBIRDS."))
def test_case_51(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check51",
"\nBIRDS."))
def test_case_52(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check52",
"*&^%!@#$%^&*()BIRDS."))
def test_case_53(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check53",
"+-BIRDS."))
def test_case_54(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check54",
"'BIRDS."))
def test_case_55(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check55",
"中文BIRDS."))
def test_case_56(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check56",
"中文asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_57(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check57",
"'asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_58(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check58",
"+-asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_59(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check59",
"*&^%!@#$%^&*()asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_60(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check60",
"\nasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_61(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check61",
".CAPITALasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_62(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check62",
"<>asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_63(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check63",
"_a-basdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_64(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check64",
"123456asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_65(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check65",
"Angryasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_66(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check66",
" asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_67(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check67",
" asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_68(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check68",
"Angryasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_69(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check69",
"123456asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_70(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check70",
"_a-basdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_71(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check71",
"<>asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_72(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check72",
".CAPITALasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_73(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check73",
"\nasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_74(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check74",
"*&^%!@#$%^&*()asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_75(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check75",
"+-asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_76(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check76",
"'asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_77(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check77",
"中文asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_78(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check78",
"中文BIRDS."))
def test_case_79(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check79",
"'BIRDS."))
def test_case_80(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check80",
"+-BIRDS."))
def test_case_81(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check81",
"*&^%!@#$%^&*()BIRDS."))
def test_case_82(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check82",
"\nBIRDS."))
def test_case_83(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check83",
".CAPITALBIRDS."))
def test_case_84(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check84",
"<>BIRDS."))
def test_case_85(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check85",
"_a-bBIRDS."))
def test_case_86(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check86",
"123456BIRDS."))
def test_case_87(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check87",
"AngryBIRDS."))
def test_case_88(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check88",
" BIRDS."))
def test_case_89(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check89",
" b "))
def test_case_90(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check90",
"Angryb "))
def test_case_91(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check91",
"123456b "))
def test_case_92(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check92",
"_a-bb "))
def test_case_93(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check93",
"<>b "))
def test_case_94(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check94",
".CAPITALb "))
def test_case_95(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check95",
"\nb "))
def test_case_96(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check96",
"*&^%!@#$%^&*()b "))
def test_case_97(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check97",
"+-b "))
def test_case_98(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check98",
"'b "))
def test_case_99(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check99",
"中文b "))
def test_case_100(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check100",
"中文a b"))
def test_case_101(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check101",
"'a b"))
def test_case_102(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check102",
"+-a b"))
def test_case_103(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check103",
"*&^%!@#$%^&*()a b"))
def test_case_104(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check104",
"\na b"))
def test_case_105(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check105",
".CAPITALa b"))
def test_case_106(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check106",
"<>a b"))
def test_case_107(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check107",
"_a-ba b"))
def test_case_108(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check108",
"123456a b"))
def test_case_109(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check109",
"Angrya b"))
def test_case_110(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check110",
" a b"))
def test_case_111(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check111",
" "))
def test_case_112(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check112",
"Angry "))
def test_case_113(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check113",
"123456 "))
def test_case_114(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check114",
"_a-b "))
def test_case_115(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check115",
"<> "))
def test_case_116(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check116",
".CAPITAL "))
def test_case_117(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check117",
"\n "))
def test_case_118(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check118",
"*&^%!@#$%^&*() "))
def test_case_119(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check119",
"+- "))
def test_case_120(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check120",
"' "))
def test_case_121(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check121",
"中文 "))
def test_case_122(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check122",
"中文birds"))
def test_case_123(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check123",
"'birds"))
def test_case_124(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check124",
"+-birds"))
def test_case_125(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check125",
"*&^%!@#$%^&*()birds"))
def test_case_126(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check126",
"\nbirds"))
def test_case_127(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check127",
".CAPITALbirds"))
def test_case_128(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check128",
"<>birds"))
def test_case_129(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check129",
"_a-bbirds"))
def test_case_130(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check130",
"123456birds"))
def test_case_131(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check131",
"Angrybirds"))
def test_case_132(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check132",
" birds"))
def test_case_133(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check133",
" birds"))
def test_case_134(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check134",
"Angrybirds"))
def test_case_135(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check135",
"123456birds"))
def test_case_136(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check136",
"_a-bbirds"))
def test_case_137(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check137",
"<>birds"))
def test_case_138(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check138",
".CAPITALbirds"))
def test_case_139(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check139",
"\nbirds"))
def test_case_140(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check140",
"*&^%!@#$%^&*()birds"))
def test_case_141(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check141",
"+-birds"))
def test_case_142(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check142",
"'birds"))
def test_case_143(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check143",
"中文birds"))
def test_case_144(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check144",
"中文 "))
def test_case_145(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check145",
"' "))
def test_case_146(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check146",
"+- "))
def test_case_147(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check147",
"*&^%!@#$%^&*() "))
def test_case_148(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check148",
"\n "))
def test_case_149(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check149",
".CAPITAL "))
def test_case_150(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check150",
"<> "))
def test_case_151(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check151",
"_a-b "))
def test_case_152(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check152",
"123456 "))
def test_case_153(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check153",
"Angry "))
def test_case_154(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check154",
" "))
def test_case_155(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check155",
" a b"))
def test_case_156(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check156",
"Angrya b"))
def test_case_157(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check157",
"123456a b"))
def test_case_158(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check158",
"_a-ba b"))
def test_case_159(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check159",
"<>a b"))
def test_case_160(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check160",
".CAPITALa b"))
def test_case_161(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check161",
"\na b"))
def test_case_162(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check162",
"*&^%!@#$%^&*()a b"))
def test_case_163(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check163",
"+-a b"))
def test_case_164(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check164",
"'a b"))
def test_case_165(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check165",
"中文a b"))
def test_case_166(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check166",
"中文b "))
def test_case_167(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check167",
"'b "))
def test_case_168(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check168",
"+-b "))
def test_case_169(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check169",
"*&^%!@#$%^&*()b "))
def test_case_170(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check170",
"\nb "))
def test_case_171(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check171",
".CAPITALb "))
def test_case_172(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check172",
"<>b "))
def test_case_173(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check173",
"_a-bb "))
def test_case_174(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check174",
"123456b "))
def test_case_175(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check175",
"Angryb "))
def test_case_176(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check176",
" b "))
def test_case_177(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check177",
" BIRDS."))
def test_case_178(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check178",
"AngryBIRDS."))
def test_case_179(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check179",
"123456BIRDS."))
def test_case_180(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check180",
"_a-bBIRDS."))
def test_case_181(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check181",
"<>BIRDS."))
def test_case_182(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check182",
".CAPITALBIRDS."))
def test_case_183(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check183",
"\nBIRDS."))
def test_case_184(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check184",
"*&^%!@#$%^&*()BIRDS."))
def test_case_185(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check185",
"+-BIRDS."))
def test_case_186(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check186",
"'BIRDS."))
def test_case_187(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check187",
"中文BIRDS."))
def test_case_188(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check188",
"中文asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_189(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check189",
"'asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_190(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check190",
"+-asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_191(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check191",
"*&^%!@#$%^&*()asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_192(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check192",
"\nasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_193(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check193",
".CAPITALasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_194(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check194",
"<>asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_195(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check195",
"_a-basdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_196(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check196",
"123456asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_197(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check197",
"Angryasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_198(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check198",
" asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_199(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check199",
" asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_200(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check200",
"Angryasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_201(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check201",
"123456asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_202(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check202",
"_a-basdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_203(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check203",
"<>asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_204(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check204",
".CAPITALasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_205(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check205",
"\nasdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_206(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check206",
"*&^%!@#$%^&*()asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_207(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check207",
"+-asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_208(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check208",
"'asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_209(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check209",
"中文asdfasdfasdfasdfdfghfggfhjhjerewrtrtyyuivghxvasdaetsdfgxcvbrtysadawfasdfasdewrtwer"))
def test_case_210(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check210",
"中文BIRDS."))
def test_case_211(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check211",
"'BIRDS."))
def test_case_212(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check212",
"+-BIRDS."))
def test_case_213(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check213",
"*&^%!@#$%^&*()BIRDS."))
def test_case_214(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check214",
"\nBIRDS."))
def test_case_215(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check215",
".CAPITALBIRDS."))
def test_case_216(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check216",
"<>BIRDS."))
def test_case_217(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check217",
"_a-bBIRDS."))
def test_case_218(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check218",
"123456BIRDS."))
def test_case_219(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check219",
"AngryBIRDS."))
def test_case_220(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check220",
" BIRDS."))
def test_case_221(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check221",
" b "))
def test_case_222(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check222",
"Angryb "))
def test_case_223(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check223",
"123456b "))
def test_case_224(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check224",
"_a-bb "))
def test_case_225(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check225",
"<>b "))
def test_case_226(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check226",
".CAPITALb "))
def test_case_227(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check227",
"\nb "))
def test_case_228(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check228",
"*&^%!@#$%^&*()b "))
def test_case_229(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check229",
"+-b "))
def test_case_230(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check230",
"'b "))
def test_case_231(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check231",
"中文b "))
def test_case_232(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check232",
"中文a b"))
def test_case_233(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check233",
"'a b"))
def test_case_234(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check234",
"+-a b"))
def test_case_235(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check235",
"*&^%!@#$%^&*()a b"))
def test_case_236(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check236",
"\na b"))
def test_case_237(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check237",
".CAPITALa b"))
def test_case_238(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check238",
"<>a b"))
def test_case_239(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check239",
"_a-ba b"))
def test_case_240(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check240",
"123456a b"))
def test_case_241(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check241",
"Angrya b"))
def test_case_242(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check242",
" a b"))
def test_case_243(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check243",
" "))
def test_case_244(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check244",
"Angry "))
def test_case_245(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check245",
"123456 "))
def test_case_246(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check246",
"_a-b "))
def test_case_247(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check247",
"<> "))
def test_case_248(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check248",
".CAPITAL "))
def test_case_249(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check249",
"\n "))
def test_case_250(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check250",
"*&^%!@#$%^&*() "))
def test_case_251(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check251",
"+- "))
def test_case_252(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check252",
"' "))
def test_case_253(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check253",
"中文 "))
def test_case_254(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check254",
"中文birds"))
def test_case_255(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check255",
"'birds"))
def test_case_256(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check256",
"+-birds"))
def test_case_257(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check257",
"*&^%!@#$%^&*()birds"))
def test_case_258(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check258",
"\nbirds"))
def test_case_259(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check259",
".CAPITALbirds"))
def test_case_260(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check260",
"<>birds"))
def test_case_261(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check261",
"_a-bbirds"))
def test_case_262(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check262",
"123456birds"))
def test_case_263(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check263",
"Angrybirds"))
def test_case_264(self):
self.assertEqual(
"Pass",
run_test.run_test_result(
"Crosswalk-Manifest-Check264",
" birds"))
if __name__ == '__main__':
suite1 = unittest.TestLoader().loadTestsFromTestCase(TestCaseUnit)
suite = unittest.TestSuite([suite1])
unittest.TextTestRunner(verbosity=2).run(suite)
|
qiuzhong/crosswalk-test-suite
|
wrt/wrt-manifest-tizen-tests/test.py
|
Python
|
bsd-3-clause
| 53,807
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import api, models, fields, _
from openerp.exceptions import Warning
class CompassionHold(models.Model):
_name = 'compassion.hold'
name = fields.Char('Name')
hold_id = fields.Char(readonly=True)
child_id = fields.Many2one('compassion.child', 'Child on hold',
readonly=True)
child_name = fields.Char(
'Child on hold', related='child_id.name', readonly=True)
type = fields.Selection([
('Available', 'Available'),
('Change Commitment Hold', 'Change Commitment Hold'),
('Consignment Hold', 'Consignment Hold'),
('Delinquent Mass Cancel Hold', 'Delinquent Mass Cancel Hold'),
('E-Commerce Hold', 'E-Commerce Hold'),
('Inactive', 'Inactive'),
('Ineligible', 'Ineligible'),
('No Money Hold', 'No Money Hold'),
('Reinstatement Hold', 'Reinstatement Hold'),
('Reservation Hold', 'Reservation Hold'),
('Sponsor Cancel Hold', 'Sponsor Cancel Hold'),
('Sponsored', 'Sponsored'),
('Sub Child Hold', 'Sub Child Hold')
])
expiration_date = fields.Datetime()
primary_owner = fields.Char()
secondary_owner = fields.Char()
no_money_yield_rate = fields.Float()
yield_rate = fields.Float()
channel = fields.Char()
source_code = fields.Char()
active = fields.Boolean(default=True, readonly=True)
@api.multi
def release_hold(self):
message_obj = self.env['gmc.message.pool']
action_id = self.env.ref('child_compassion.release_hold').id
self.active = False
message_vals = {
'action_id': action_id,
'object_id': self.id
}
if self.child_id.sponsor_id:
raise Warning(_("Cancel impossible"), _("This hold is on a "
"sponsored child!"))
else:
self.child_id.active = False
message_obj.create(message_vals)
@api.model
def check_hold_validity(self):
expired_holds = self.env['compassion.hold'].search([
('expiration_date', '<',
fields.Datetime.now())
])
for hold in expired_holds:
hold.active = False
return True
##########################################################################
# ORM METHODS #
##########################################################################
@api.multi
def write(self, vals):
res = super(CompassionHold, self).write(vals)
notify_vals = ['name', 'primary_owner', 'type', 'mandatory_review',
'expiration_date']
notify = reduce(lambda prev, val: prev or val in vals, notify_vals,
False)
if notify and not self.env.context.get('no_upsert'):
self.update_hold()
return res
@api.multi
def unlink(self):
self.release_hold()
return
##########################################################################
# PUBLIC METHODS #
##########################################################################
def update_hold(self):
message_obj = self.env['gmc.message.pool']
action_id = self.env.ref('child_compassion.create_hold').id
message_vals = {
'action_id': action_id,
'object_id': self.id
}
message_obj.create(message_vals)
|
MickSandoz/compassion-modules
|
child_compassion/models/compassion_hold.py
|
Python
|
agpl-3.0
| 4,052
|
# Sprites for the platform game
import pygame as pg
from settings import *
import random
from random import choice, randrange
vec = pg.math.Vector2
class Spritesheet:
# Class for loading and handling of sprites
def __init__(self, filename):
self.spritesheet = pg.image.load(filename).convert_alpha()
def get_image(self, x, y, width, height):
# Get image from spritesheet(?)
image = pg.Surface((width,height))
image.blit(self.spritesheet, (0,0), (x, y, width, height))
# Scaling the image
#image = pg.transform.scale(image, (54,78)) #54,78
return image
class Player(pg.sprite.Sprite):
def __init__(self, game):
self._layer = player_layer
self.groups = game.sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
# Character Animation
self.walking = False
self.jumping = False
self.dashing = False # I.e figure of 8 run
self.current_frame = 0
self.last_update = 0
self.load_images()
self.image = self.standing_frames[0]
#self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.rect.center = (width/2, height/2)
self.pos = vec(40, height-50)
self.vel = vec(0,0)
self.acc = vec(0,0)
# Animation
def load_images(self):
# Dashing
self.dashing_frames = [self.game.spritesheet.get_image(558,32,31,35),
self.game.spritesheet.get_image(598, 31, 31, 37),
self.game.spritesheet.get_image(639, 31, 31, 36),
self.game.spritesheet.get_image(681, 32, 30, 35)]
self.dashing_frames_l = []
for frame in self.dashing_frames: # Right movement
frame.set_colorkey(black)
self.dashing_frames_l.append(pg.transform.flip(frame, True, False))
# Standing
self.standing_frames = [self.game.spritesheet.get_image(198,193,27,39)]
self.standing_frames_r = []
for frame in self.standing_frames:
self.standing_frames_r.append(pg.transform.flip(frame, True, False))
for frame in self.standing_frames:
frame.set_colorkey(black)
pass
# Walking frames
self.walk_frames_r = [self.game.spritesheet.get_image(182,35,25,37),
self.game.spritesheet.get_image(212, 35, 26, 37),
self.game.spritesheet.get_image(248, 34, 27, 38),
self.game.spritesheet.get_image(279, 34, 37, 38),
self.game.spritesheet.get_image(323, 34, 32, 38),
self.game.spritesheet.get_image(363, 34, 31, 38),
self.game.spritesheet.get_image(399, 35, 26, 37)]
self.walk_frames_l = []
for frame in self.walk_frames_r:
frame.set_colorkey(black)
self.walk_frames_l.append(pg.transform.flip(frame, True, False)) # Horizontal veritcal for true and false
# Jump frames
self.jumpA = [self.game.spritesheet.get_image(233, 200, 24, 32),
self.game.spritesheet.get_image(233, 200, 24, 32),
self.game.spritesheet.get_image(233, 200, 24, 32),
self.game.spritesheet.get_image(172, 153, 32, 29),
self.game.spritesheet.get_image(213, 152, 29, 30),
self.game.spritesheet.get_image(250, 152, 30, 30),
self.game.spritesheet.get_image(290, 152, 30, 30),
self.game.spritesheet.get_image(408, 203, 30, 30),
self.game.spritesheet.get_image(408, 203, 30, 30),
self.game.spritesheet.get_image(408, 203, 30, 30),
self.game.spritesheet.get_image(408, 203, 30, 30),
self.game.spritesheet.get_image(408, 203, 30, 30),
self.game.spritesheet.get_image(408, 203, 30, 30),
self.game.spritesheet.get_image(172, 153, 32, 29),
self.game.spritesheet.get_image(213, 152, 29, 30),
self.game.spritesheet.get_image(250, 152, 30, 30),
self.game.spritesheet.get_image(290, 152, 30, 300),
self.game.spritesheet.get_image(267, 206, 29, 27),
self.game.spritesheet.get_image(267, 206, 29, 27),
self.game.spritesheet.get_image(267, 206, 29, 27),
self.game.spritesheet.get_image(267, 206, 29, 27),
self.game.spritesheet.get_image(232, 200, 25, 32),
self.game.spritesheet.get_image(232, 200, 25, 32),
self.game.spritesheet.get_image(232, 200, 25, 32),
self.game.spritesheet.get_image(232, 200, 25, 32),
self.game.spritesheet.get_image(198, 193, 27, 39)]
self.jumpA_l = []
for frame in self.jumpA:
frame.set_colorkey(black)
self.jumpA_l.append(pg.transform.flip(frame, True, False))
self.spring_image1 = self.game.spritesheet.get_image(200, 244, 24, 45)
self.spring_image1 = pg.transform.scale(self.spring_image1, (54, 78))
self.spring_image1.set_colorkey(black)
def jump(self):
# Jump if standing on a platform only
self.rect.x += 2
hits = pg.sprite.spritecollide(self, self.game.platforms, False)
self.rect.x -= 2
if hits:
self.game.jump_sound.play()
self.vel.y = -20
def update(self):
self.animate()
self.acc = vec(0,player_grav) #0.5 for gravity
keys = pg.key.get_pressed()
if keys[pg.K_RIGHT]:
self.acc.x = player_acc
if keys[pg.K_LEFT]:
self.acc.x = -player_acc
# Friction rules
self.acc.x += self.vel .x * player_friction
# Equations of motion
self.vel += self.acc # For sliding motion
if abs(self.vel.x) < 0.1:
self.temp = self.vel.x
self.vel.x = 0
self.pos += self.vel + 0.5 * self.acc
# Wrap around screen
if self.pos.x > width + self.rect.width/2:
self.pos.x = 0 - self.rect.width/2
if self.pos.x < 0 - self.rect.width/2:
self.pos.x = width + self.rect.width/2
self.rect.midbottom = self.pos
def animate(self):
now = pg.time.get_ticks()
if self.vel.y < -19:
self.jumping = True
if self.vel.y >= 0:
self.jumping = False
if self.jumping:
if now - self.last_update > 50: # ms
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.jumpA)
bottom = self.rect.bottom
if self.vel.x > 0: # Positive for right direction
self.image = self.jumpA[self.current_frame]
self.image = pg.transform.scale(self.image, (54, 78))
else:
self.image = self.jumpA_l[self.current_frame]
self.image = pg.transform.scale(self.image, (54, 78))
self.rect = self.image.get_rect()
self.rect.bottom = bottom
# For spring animatiom
if self.vel.y >-60 and self.vel.y <-21:
if self.vel.x > 0:
self.image = self.spring_image1
else:
self.image = pg.transform.flip(self.spring_image1, True, False)
if abs(self.vel.x) >= 6:
self.dashing = True
else:
self.dashing = False
if self.vel.x != 0:
self.walking = True
else:
self.walking = False
# Running/Dashing animation
if self.dashing:
if now - self.last_update > 100: # ms
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.dashing_frames_l)
bottom = self.rect.bottom
if self.vel.x > 0: # Positive for right direction
self.image = self.dashing_frames[self.current_frame]
self.image = pg.transform.scale(self.image, (54, 78))
else:
self.image = self.dashing_frames_l[self.current_frame]
self.image = pg.transform.scale(self.image, (54, 78))
self.rect = self.image.get_rect()
self.rect.bottom = bottom
# Show walk animation
if self.walking:
if now - self.last_update > 100:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.walk_frames_l)
bottom = self.rect.bottom
if self.vel.x > 0: # Positive for right direction
self.image = self.walk_frames_r[self.current_frame]
self.image = pg.transform.scale(self.image, (54, 78))
else:
self.image = self.walk_frames_l[self.current_frame]
self.image = pg.transform.scale(self.image, (54, 78))
self.rect = self.image.get_rect()
self.rect.bottom = bottom
# Show idle animation
if not self.jumping and not self.walking and not self.dashing:
if now - self.last_update > 270: # ms
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.standing_frames)
bottom = self.rect.bottom
self.image = self.standing_frames[self.current_frame]
self.image = pg.transform.scale(self.image, (54, 78))
self.rect = self.image.get_rect()
self.rect.bottom = bottom
# Improved collision
self.mask = pg.mask.from_surface(self.image)
# Clouds
class Cloud(pg.sprite.Sprite):
def __init__(self, game):
self._layer = cloud_layer
self.groups = game.sprites, game.clouds
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = self.game.spritesheet.get_image(44, 102, 42, 14)
scale = randrange(50, 101) / 100
self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.image = pg.transform.scale(self.image, (int(200* scale), int(50*scale)))
# Spawn location
self.rect.x = randrange(width/2)
self.rect.y = randrange(-500, -50)
def update(self):
if self.rect.top > height * 2:
self.kill()
# Floating Island Scenery
class Floatingisland(pg.sprite.Sprite):
def __init__(self, game):
self._layer = island_layer
self.groups = game.sprites, game.island
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = self.game.spritesheet.get_image(579, 210, 189, 93)
self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.image = pg.transform.scale(self.image, (110, 50))
# Spawn location
self.rect.x = randrange(width - 200)
self.rect.y = randrange(-500, -50)
def update(self):
if self.rect.top > height * 2:
self.kill()
# Class for waterfall scenery
class Waterfall(pg.sprite.Sprite):
def __init__(self, game):
self._layer = waterfall_layer
self.groups = game.sprites, game.waters
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.type = choice(['waterf'])
self.last = 0
self.current_frame = 0
self.load_waterfall_images()
self.image = self.waterfall_images[0]
self.rect = self.image.get_rect()
# Spawn location
self.rect.x = width - 100
self.rect.y = 0
def update(self):
self.water_animate()
def load_waterfall_images(self):
# Waterfall Game scenery
self.waterfall_images = [self.game.spritesheet.get_image(2834, 370, 64, 304),
self.game.spritesheet.get_image(2914, 370, 64, 304),
self.game.spritesheet.get_image(2989, 370, 64, 304),
self.game.spritesheet.get_image(3066, 370, 64, 304),
self.game.spritesheet.get_image(3138, 370, 64, 304),
self.game.spritesheet.get_image(3211, 370, 64, 304),
self.game.spritesheet.get_image(3299, 370, 64, 304),
self.game.spritesheet.get_image(3373, 370, 64, 304)
]
for frame in self.waterfall_images:
frame.set_colorkey(black)
#self.waterfall_images[0] = pg.transform.scale(self.waterfall_images[0], (64, height))
def water_animate(self):
now = pg.time.get_ticks()
if now - self.last > 50: # ms
self.last = now
self.current_frame = (self.current_frame + 1) % len(self.waterfall_images)
self.image = self.waterfall_images[self.current_frame]
self.image = pg.transform.scale(self.image, (64, height))
class Platform(pg.sprite.Sprite):
def __init__(self, game, x, y):
self._layer = platform_layer
self.groups = game.sprites, game.platforms
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
images = [self.game.spritesheet.get_image(56,310,65,30)
]
self.image = choice(images)
self.image.set_colorkey(black)
self.image = pg.transform.scale(self.image, (200, 50))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
if randrange(100) < spring_spawn_freq:
Spring(self.game, self)
if randrange(50) < ring_spawn_freq:
Ringg(self.game, self)
class Spring(pg.sprite.Sprite): #
def __init__(self, game, plat): # Platform to sit on
self._layer = spring_layer
self.groups = game.sprites, game.powerups
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.plat = plat # The platform
self.type = choice(['spring'])
self.image = self.game.spritesheet.get_image(632, 156, 28, 16)
self.image = pg.transform.scale(self.image, (56, 32))
self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.rect.centerx = self.plat.rect.centerx # Place at center of platform
self.rect.bottom = self.plat.rect.top + 3
def update(self):
self.rect.bottom = self.plat.rect.top + 3
# For powerup sprites
if not self.game.platforms.has(self.plat):
self.kill()
class Ringg(pg.sprite.Sprite): #
def __init__(self, game, plat): # Platform to sit on
self._layer = ring_layer
self.groups = game.sprites, game.powerups1
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.load_ring_images()
self.plat = plat # The platform
self.current_frame = 0
self.last = 0
self.type = choice(['ring'])
self.image = self.ring_images[0]
self.image = pg.transform.scale(self.image, (27, 27))
self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.rect.centerx = self.plat.rect.centerx # Place at center of platform
self.rect.bottom = self.plat.rect.top + 3
def update(self):
self.animate()
self.rect.bottom = self.plat.rect.top + 3
if not self.game.platforms.has(self.plat):
self.kill()
#If platform has a spring, kill
def load_ring_images(self):
self.ring_images = [self.game.spritesheet.get_image(459, 269, 16, 16),
self.game.spritesheet.get_image(479, 269, 12, 16),
self.game.spritesheet.get_image(494, 269, 6, 16),
self.game.spritesheet.get_image(512, 269, 12, 16),
self.game.spritesheet.get_image(528, 269, 16, 16)]
for frame in self.ring_images:
frame.set_colorkey(black)
def animate(self):
now = pg.time.get_ticks()
if now - self.last > 50: # ms
self.last = now
self.current_frame = (self.current_frame + 1) % len(self.ring_images)
self.image = self.ring_images[self.current_frame]
self.image = pg.transform.scale(self.image, (27, 27))
# I.e. the badniks / enemies
class Mob(pg.sprite.Sprite): #
def __init__(self, game): # Platform to sit on
self._layer = mob_layer
self.groups = game.sprites, game.mobs
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = self.game.spritesheet.get_image(256, 484,23,37)
self.image = pg.transform.scale(self.image, (54, 78))
self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.rect.centerx = choice([-100, width + 100])
self.vx= randrange(1,4)
if self.rect.centerx> width:
self.vx *= -1
self.rect.y = randrange(height/2)
self.vy = 0
self.dy = 0.5
def update(self):
self.rect.x += self.vx
self.vy += self.dy
if self.vy > 3 or self.vy < -3:
self.dy *= -1
center = self.rect.center
self.rect = self.image.get_rect()
self.mask = pg.mask.from_surface(self.image)
self.rect.center = center
self.rect.y += self.vy
if self.rect.left > width + 100 or self.rect.right <-100:
self.kill()
class Startscreen(pg.sprite.Sprite):
def __init__(self, game,x,y,):
self._layer = start_layer
self.groups = game.sprites, game.startGui
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.load_images()
self.image = self.startImage
self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
#self.pos = vec(40, height - 50)
|
coderfourfun/Sonic-Classic-Jump
|
sprites.py
|
Python
|
mit
| 18,893
|
# encoding: utf-8
# 2011 © Bruno Chareyre <bruno.chareyre@hmg.inpg.fr>
import yade,math,os,sys
scriptsToRun=os.listdir(checksPath)
resultStatus = 0
nFailed=0
skipScripts = ['checkList.py']
for script in scriptsToRun:
if (script[len(script)-3:]==".py" and script not in skipScripts):
try:
print "###################################"
print "running: ",script
execfile(checksPath+"/"+script)
if (resultStatus>nFailed):
print "Status: FAILURE!!!"
nFailed=resultStatus
else:
print "Status: success"
print "___________________________________"
except:
print script," failure"
O.reset()
elif (script in skipScripts):
print "###################################"
print "Skipping %s, because it is in SkipScripts"%script
if (resultStatus>0):
print resultStatus, " tests are failed"
sys.exit(1)
else:
sys.exit(0)
|
anna-effeindzourou/trunk
|
scripts/checks-and-tests/checks/checkList.py
|
Python
|
gpl-2.0
| 859
|
# -*- coding: utf-8 -*-
"""Tests for xmlreader module."""
#
# (C) Pywikibot team, 2009-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import xmlreader
from tests import join_xml_data_path
from tests.aspects import unittest, TestCase
class XmlReaderTestCase(TestCase):
"""XML Reader test cases."""
net = False
def _get_entries(self, filename, **kwargs):
"""Get all entries via XmlDump."""
entries = [r for r in
xmlreader.XmlDump(join_xml_data_path(filename),
**kwargs).parse()]
return entries
class ExportDotThreeTestCase(XmlReaderTestCase):
"""XML export version 0.3 tests."""
def test_XmlDumpAllRevs(self):
"""Test loading all revisions."""
pages = self._get_entries('article-pear.xml', allrevisions=True)
self.assertEqual(4, len(pages))
self.assertEqual(u"Automated conversion", pages[0].comment)
self.assertEqual(u"Pear", pages[0].title)
self.assertEqual(u"24278", pages[0].id)
self.assertTrue(pages[0].text.startswith('Pears are [[tree]]s of'))
self.assertEqual(u"Quercusrobur", pages[1].username)
self.assertEqual(u"Pear", pages[0].title)
def test_XmlDumpFirstRev(self):
"""Test loading the first revision."""
pages = self._get_entries("article-pear.xml", allrevisions=False)
self.assertEqual(1, len(pages))
self.assertEqual(u"Automated conversion", pages[0].comment)
self.assertEqual(u"Pear", pages[0].title)
self.assertEqual(u"24278", pages[0].id)
self.assertTrue(pages[0].text.startswith('Pears are [[tree]]s of'))
self.assertTrue(not pages[0].isredirect)
def test_XmlDumpRedirect(self):
"""Test XmlDump correctly parsing whether a page is a redirect."""
pages = self._get_entries('article-pyrus.xml', allrevisions=True)
pages = [r for r in
xmlreader.XmlDump(join_xml_data_path('article-pyrus.xml')).parse()]
self.assertTrue(pages[0].isredirect)
def _compare(self, previous, variant, all_revisions):
"""Compare the tested variant with the previous (if not None)."""
entries = self._get_entries('article-pyrus' + variant,
allrevisions=all_revisions)
result = [entry.__dict__ for entry in entries]
if previous:
self.assertEqual(previous, result)
return result
def _compare_variants(self, all_revisions):
"""Compare the different XML file variants."""
previous = None
previous = self._compare(previous, '.xml', all_revisions)
previous = self._compare(previous, '-utf16.xml', all_revisions)
previous = self._compare(previous, '.xml.bz2', all_revisions)
previous = self._compare(previous, '-utf16.xml.bz2', all_revisions)
def test_XmlDump_compare_all(self):
"""Compare the different XML files using all revisions."""
self._compare_variants(True)
def test_XmlDump_compare_single(self):
"""Compare the different XML files using only a single revision."""
self._compare_variants(False)
class ExportDotTenTestCase(XmlReaderTestCase):
"""XML export version 0.10 tests."""
def test_pair(self):
"""Test reading the main page/user talk page pair file."""
entries = self._get_entries('pair-0.10.xml', allrevisions=True)
self.assertEqual(4, len(entries))
self.assertTrue(all(entry.username == 'Carlossuarez46'
for entry in entries))
self.assertTrue(all(entry.isredirect is False for entry in entries))
articles = entries[0:2]
talks = entries[2:4]
self.assertEqual(2, len(articles))
self.assertTrue(all(entry.id == "19252820" for entry in articles))
self.assertTrue(all(entry.title == u"Çullu, Agdam"
for entry in articles))
self.assertTrue(all(u'Çullu, Quzanlı' in entry.text
for entry in articles))
self.assertEqual(articles[0].text, u'#REDIRECT [[Çullu, Quzanlı]]')
self.assertEqual(2, len(talks))
self.assertTrue(all(entry.id == "19252824" for entry in talks))
self.assertTrue(all(entry.title == u"Talk:Çullu, Agdam"
for entry in talks))
self.assertEqual(talks[1].text, '{{DisambigProject}}')
self.assertEqual(talks[1].comment, 'proj')
def test_edit_summary_decoding(self):
"""Test edit summaries are decoded."""
entries = self._get_entries('pair-0.10.xml', allrevisions=True)
articles = [entry for entry in entries if entry.ns == "0"]
# It does not decode the edit summary
self.assertEqual(articles[0].comment,
u'moved [[Çullu, Agdam]] to [[Çullu, Quzanlı]]: dab')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
darthbhyrava/pywikibot-local
|
tests/xmlreader_tests.py
|
Python
|
mit
| 5,108
|
import ArtusConfigBase as base
import mc
def config():
conf = mc.config()
l = []
for pipeline in conf['Pipelines']:
if not pipeline.startswith('all'):
l.append(pipeline)
elif 'CHS' not in pipeline:
l.append(pipeline)
for pipeline in l:
del conf['Pipelines'][pipeline]
for pipeline in conf['Pipelines']:
conf['Pipelines'][pipeline]['Consumer'] = [
#"muonntuple",
"jetntuple",
]
return conf
|
dhaitz/CalibFW
|
cfg/artus/mc_noc.py
|
Python
|
gpl-2.0
| 508
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-19 13:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pictures', '0009_auto_20161219_1306'),
('education', '0002_auto_20161005_1514'),
]
operations = [
migrations.RemoveField(
model_name='classday',
name='faculty',
),
migrations.RemoveField(
model_name='classunit',
name='days',
),
migrations.RemoveField(
model_name='classunit',
name='faculty',
),
migrations.RemoveField(
model_name='enroll',
name='group',
),
migrations.RemoveField(
model_name='enroll',
name='student',
),
migrations.RemoveField(
model_name='enroll',
name='subject',
),
migrations.RemoveField(
model_name='group',
name='semester',
),
migrations.RemoveField(
model_name='group',
name='subject',
),
migrations.DeleteModel(
name='NonLectiveDay',
),
migrations.RemoveField(
model_name='timetableentry',
name='weekly_timetable_entry',
),
migrations.RemoveField(
model_name='weeklytimetableentry',
name='classroom',
),
migrations.RemoveField(
model_name='weeklytimetableentry',
name='day',
),
migrations.RemoveField(
model_name='weeklytimetableentry',
name='group',
),
migrations.RemoveField(
model_name='weeklytimetableentry',
name='subject',
),
migrations.RemoveField(
model_name='weeklytimetableentry',
name='unit',
),
migrations.AddField(
model_name='subject',
name='group',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='subject',
name='semester',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='education.Semester'),
),
migrations.DeleteModel(
name='ClassDay',
),
migrations.DeleteModel(
name='ClassUnit',
),
migrations.DeleteModel(
name='Enroll',
),
migrations.DeleteModel(
name='Group',
),
migrations.DeleteModel(
name='TimetableEntry',
),
migrations.DeleteModel(
name='WeeklyTimetableEntry',
),
]
|
photoboard/photoboard-django
|
education/migrations/0003_auto_20161219_1306.py
|
Python
|
mit
| 2,856
|
# Copyright (c) Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from unlock.state.state import UnlockState, TrialState
class VepTrainerState(UnlockState):
NextTarget = 1
PrevTarget = 2
TrialStart = 1
TrialEnd = 2
def __init__(self, stimuli, targets, n_trials=None, trial_sequence=None):
super(VepTrainerState, self).__init__()
self.stimuli = stimuli
self.targets = targets
self.target_idx = 0
self.n_trials = n_trials
self.trial_count = 0
if trial_sequence is None or trial_sequence == 'manual':
self.trial_sequence = None
elif trial_sequence == 'random':
assert n_trials % len(self.targets) == 0
self.trial_sequence = np.random.permutation(np.repeat(
np.arange(len(self.targets)), n_trials / len(self.targets)))
else:
self.trial_sequence = trial_sequence
self.state = None
self.state_change = False
def get_state(self):
if self.state_change:
self.state_change = False
return self.state
def set_state(self, state):
self.state = state
self.state_change = True
def trial_start(self):
self.set_state(VepTrainerState.TrialStart)
self.stimuli.start()
def trial_stop(self):
self.set_state(VepTrainerState.TrialEnd)
self.stimuli.stop()
for stimulus in self.stimuli.stimuli:
stimulus.state = None
def process_command(self, command):
if not self.stimuli.state.is_stopped():
if self.trial_count == 0:
# this is a hack to get around the current setup where the
# stimuli starts immediately
self.trial_stop()
elif self.stimuli.state.last_change == TrialState.TrialExpiry:
# there is an occasional delay apparently that can happen when
# using actual devices which causes this state to be missed
# i.e. it goes to rest, then the next rest state, resulting in
# an Unchanged response, before this check happens. A better
# method would preserve the value until it was queried.
self.trial_stop()
else:
return
if command.selection:
self.trial_count += 1
if self.trial_count <= self.n_trials:
self.handle_selection()
self.trial_start()
if command.decision is not None:
self.handle_decision(command.decision)
def handle_decision(self, decision):
if decision == VepTrainerState.NextTarget:
self.target_idx = (self.target_idx + 1) % len(self.targets)
self.update_stimulus(self.targets[self.target_idx])
elif decision == VepTrainerState.PrevTarget:
self.target_idx = (self.target_idx - 1) % len(self.targets)
self.update_stimulus(self.targets[self.target_idx])
def handle_selection(self):
if self.trial_sequence is not None:
self.target_idx = self.trial_sequence[self.trial_count-1]
self.update_stimulus(self.targets[self.target_idx])
def update_stimulus(self, target):
pass
class MsequenceTrainerState(VepTrainerState):
def update_stimulus(self, sequence):
self.stimuli.stimuli[0].seq_state.sequence = sequence
class SsvepTrainerState(VepTrainerState):
def update_stimulus(self, frequency):
self.stimuli.stimuli[0].time_state.duration = 1 / (2 * frequency)
|
NeuralProsthesisLab/unlock
|
unlock/state/trainer_state.py
|
Python
|
bsd-3-clause
| 5,106
|
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
from django.conf import settings
from django.utils.crypto import get_random_string
from optparse import make_option
import shutil
import os.path
class Command(BaseCommand):
help = _('Copies settings.py file from local project template to current project')
#NOTE django > 1.8 uses argparse instead of optparse module,
#so "You are encouraged to exclusively use **options for new commands."
#https://docs.djangoproject.com/en/1.9/howto/custom-management-commands/
option_list = BaseCommand.option_list + (
make_option('--project_name',
action='store',
dest='project_name',
default='cyclope_project',
help=_('Project folder name, as generated by cyclopeproject command')
),
)
def handle(self, *args, **options):
# TODO(NumericA) Django > 1.8 includes BASE_DIR setting
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CYCLOPE_DIR = os.path.join('/', * BASE_DIR.split('/')[:-3])
TEMPLATE_PATH = os.path.join(CYCLOPE_DIR, 'cyclope/conf/project_template/settings.py')
PROJECT_PATH = settings.CYCLOPE_PROJECT_PATH
# COPY
shutil.copy(TEMPLATE_PATH, PROJECT_PATH)
# PARSE TEMPLATE VARS
settings_file = os.path.join(PROJECT_PATH, 'settings.py')
project_name = options['project_name']
secret_key = self.get_random_secret_key()
self.parse_settings_vars(settings_file, project_name, secret_key)
def parse_settings_vars(self, settings_file, project_name, secret_key):
f = open(settings_file, 'r')
filedata = f.read()
f.close()
newdata = filedata.replace("{{ project_name }}", project_name)
newdata = newdata.replace("{{ secret_key }}", secret_key)
f = open(settings_file, 'w')
f.write(newdata)
f.close()
def get_random_secret_key(self):
# TODO(NumericA) Django > 1.8 from django.core.management.utils import get_random_secret_key
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
|
CodigoSur/cyclope
|
cyclope/management/commands/local_settings.py
|
Python
|
gpl-3.0
| 2,248
|
"""
general set discretization
--------------------------
Discretization set. Explicit relations between elements and their groups.
"""
import numpy as np
from scipy.sparse import issparse, coo_matrix
from ..spatialdiscretizer import BaseSpatialDiscretizor
class SetDiscretization(BaseSpatialDiscretizor):
"""Set discretization is mapping between a non-metric space and another
topological space.
"""
format_ = 'explicit'
def __init__(self, membership, regionlocs=None, metric_f=None):
"""Constructor function.
Parameters
----------
membership: numpy.ndarray, list of lists, scipy.sparse or list of dicts
the membership information. If the assignation of each element to
a collection of elements is unique the membership can be
represented as a numpy array, else it will be represented as a list
of lists.
It is stored as an unidimensional array if it is not multiple or
in an scipy.sparse way if it is multiple.
"""
## Class parameters initialization
self.multiple, self.metric, self.n_dim = None, None, None
self._initialization()
self.metric_f = metric_f
self.metric = False if metric_f is None else True
## Format membership
self._membership, (self._n, self.multiple, self._weighted,
self.regionlocs, self.regions_id) =\
format_membership(membership)
self.regions_id = self.regions_id.astype(int)
if self.regionlocs is None:
self.regionlocs = np.arange(self._n)
self.multiple = not self.multiple
@property
def borders(self):
return self._membership
def _map_loc2regionid(self, elements):
"""Discretize locs returning their region_id.
Parameters
----------
elements: optional
the locations for which we want to obtain their region given that
discretization.
Returns
-------
regions: numpy.ndarray
the region_id of each locs for this discretization.
----
if _weighted:
regions: numpy.ndarray or list or lists
the region_id of each locs for this discretization.
weights: numpy.ndarray or list of lists
the weights of membership to each region.
"""
if not '__len__' in dir(elements):
elements = [elements]
inttypes = [int, np.int32, np.int64]
ifindex = all([type(e) in inttypes for e in elements])
if not ifindex and self.regionlocs is None:
raise TypeError("Index needed.")
## Compute regions
regions = -1*np.ones(len(elements)).astype(int)
regions, weights = [], []
for i in xrange(len(elements)):
## Getting indice of element
if ifindex:
j = elements[i]
else:
j = find_idx_in_array(elements[i], self.regionlocs)
## Getting relations
if j >= 0 or j < self._n:
regions_i, weights_i =\
indexing_rows(j, self._membership, self.regions_id,
self.multiple)
regions.append(regions_i)
weights.append(weights_i)
else:
regions.append(np.array([-1]))
weights.append([0])
## Formatting output
if not self.multiple:
regions, weights = np.array(regions), np.array(weights)
if self._weighted:
return regions, weights
else:
return regions
def _map_regionid2regionlocs(self, regions=None):
"""Function which maps the regions ID to their elements.
"""
regions = self.regions_id if regions is None else regions
if type(regions) == int:
regions = np.array([regions])
elements, weights = [], []
for i in xrange(len(regions)):
## Getting col indice
idx = np.where(regions[i] == self.regions_id)[0]
if self.multiple:
j_col = idx[0]
else:
j_col = regions[i]
## Getting relations
if len(idx):
elements_i, weights_i =\
indexing_cols(j_col, self._membership,
self.regionlocs, self.multiple)
elements.append(elements_i)
weights.append(weights_i)
else:
elements.append(np.array([-1]))
weights.append([0])
if self._weighted:
return elements, weights
else:
return elements
def _compute_limits(self, region_id=None):
"Build the limits of the region discretized."
if region_id is None:
self.limits = np.array(0)
else:
return region_id
def _compute_contiguity_geom(self, region_id, params={}):
"""Compute geometric contiguity."""
if 'metric' in dir(self):
if self.metric is not True:
if self.metric_f is not None:
self.metric_f(self.regions_id, *params)
raise Exception("Any metric defined.")
def format_membership(membership):
"""Format membership to fit it into the set discretizor standart."""
if type(membership) == np.ndarray:
_membership = membership
n_elements = membership.shape[0]
_unique = True
_weighted = False
collections = None
collections_id = np.unique(membership)
elif type(membership) == list:
collections = None
n_elements = len(membership)
# Formatting to all list
types = np.array([type(e) for e in membership])
op1 = np.any([t in [np.ndarray, list] for t in types])
op2 = np.all([t == dict for t in types])
op30 = np.all([t == list for t in types])
op31 = np.all([t == np.ndarray for t in types])
op3 = op30 or op31
if op1:
for i in xrange(len(membership)):
if type(membership[i]) not in [np.ndarray, list]:
membership[i] = [membership[i]]
op3 = True
types = np.array([type(e) for e in membership])
op31 = np.all([t == np.ndarray for t in types])
# Computing if dicts
if op2:
_membership = membership
_unique = False
_weighted = True
n_elements = len(membership)
aux = [membership[i].keys() for i in xrange(n_elements)]
aux = np.hstack(aux)
collections_id = np.unique(aux)
# Computing if lists
if op3:
if op31:
membership = [list(m) for m in membership]
length = np.array([len(e) for e in membership])
if np.all(length == 1):
membership = np.array(membership)
_membership = membership
n_elements = membership.shape[0]
_unique = True
_weighted = False
collections_id = np.unique(membership)
else:
_membership = membership
n_elements = len(membership)
_unique = False
_weighted = False
aux = np.hstack(membership)
collections_id = np.unique(aux)
elif issparse(membership):
collections = None
_membership = membership
n_elements = membership.shape[0]
collections_id = np.arange(membership.shape[1])
_weighted = np.any(membership.data != 1)
_unique = np.all(membership.sum(1) == 1)
## Transform to sparse
out = n_elements, _unique, _weighted, collections, collections_id
if not _unique:
_membership, _ = to_sparse(_membership, out)
return _membership, out
def to_sparse(_membership, out):
"Return a sparse matrix object."
n_elements, _unique, _weighted, collections, collections_id = out
sh = n_elements, len(collections_id)
aux_map = dict(zip(collections_id, range(sh[1])))
if issparse(_membership):
return _membership, (range(n_elements), collections_id)
if _unique:
_membership = np.array(_membership).ravel()
matrix = np.array([aux_map[e] for e in _membership])
matrix = matrix.astype(int)
matrix = coo_matrix((np.ones(sh[0]), (range(sh[0]), matrix)),
shape=sh)
elif not _weighted:
indices = []
for i in xrange(sh[0]):
for j in range(len(_membership[i])):
indices.append((i, aux_map[_membership[i][j]]))
indices = np.array(indices)[:, 0], np.array(indices)[:, 1]
matrix = coo_matrix((np.ones(len(indices[0])), indices), shape=sh)
elif _weighted:
indices, data = [], []
for i in xrange(sh[0]):
for j in _membership[i]:
indices.append((i, aux_map[j]))
data.append(_membership[i][j])
indices = np.array(indices)[:, 0], np.array(indices)[:, 1]
matrix = coo_matrix((np.array(data), indices), shape=sh)
return matrix, (range(n_elements), collections_id)
def find_idx_in_array(element, elements):
"Return the index of the first coincidence in an iterable."
for i in xrange(len(elements)):
if element == elements[i]:
return i
def indexing_rows(j_row, _membership, regions_id, multiple):
"""Indexing rows from the membership relations."""
if multiple:
relations_i = _membership.getrow(j_row).A.ravel()
idxs = np.where(relations_i)[0]
regions_i, weights_i = regions_id[idxs], relations_i[idxs]
else:
regions_i = _membership[j_row]
weights_i = np.array([1.])
return regions_i, weights_i
def indexing_cols(j_col, _membership, regionlocs, multiple):
"""Indexing cols from the membership relations."""
## Getting elements and weights
if multiple:
relations_i = _membership.getcol(j_col).A
idxs = relations_i.nonzero()
idxs = idxs[0] if len(idxs) else idxs
elements_i, weights_i = idxs, relations_i[idxs]
else:
idxs = np.where(_membership == j_col)[0]
#idxs = idxs[0] if len(idxs) else idxs
elements_i, weights_i = idxs, np.ones(len(idxs))
## Formatting to regionlocs
if regionlocs is not None:
elements_i = regionlocs[idxs]
return elements_i, weights_i
|
tgquintela/pySpatialTools
|
pySpatialTools/Discretization/Discretization_set/general_set_discretization.py
|
Python
|
mit
| 10,575
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import logbook
import pandas as pd
from pandas.tslib import normalize_date
from six import string_types
from sqlalchemy import create_engine
from zipline.assets import AssetDBWriter, AssetFinder
from zipline.assets.continuous_futures import CHAIN_PREDICATES
from zipline.data.loader import load_market_data
from zipline.utils.calendars import get_calendar
from zipline.utils.memoize import remember_last
log = logbook.Logger('Trading')
DEFAULT_CAPITAL_BASE = 1e5
class TradingEnvironment(object):
"""
The financial simulations in zipline depend on information
about the benchmark index and the risk free rates of return.
The benchmark index defines the benchmark returns used in
the calculation of performance metrics such as alpha/beta. Many
components, including risk, performance, transforms, and
batch_transforms, need access to a calendar of trading days and
market hours. The TradingEnvironment maintains two time keeping
facilities:
- a DatetimeIndex of trading days for calendar calculations
- a timezone name, which should be local to the exchange
hosting the benchmark index. All dates are normalized to UTC
for serialization and storage, and the timezone is used to
ensure proper rollover through daylight savings and so on.
User code will not normally need to use TradingEnvironment
directly. If you are extending zipline's core financial
components and need to use the environment, you must import the module and
build a new TradingEnvironment object, then pass that TradingEnvironment as
the 'env' arg to your TradingAlgorithm.
Parameters
----------
load : callable, optional
The function that returns benchmark returns and treasury curves.
The treasury curves are expected to be a DataFrame with an index of
dates and columns of the curve names, e.g. '10year', '1month', etc.
bm_symbol : str, optional
The benchmark symbol
exchange_tz : tz-coercable, optional
The timezone of the exchange.
trading_calendar : TradingCalendar, optional
The trading calendar to work with in this environment.
asset_db_path : str or sa.engine.Engine, optional
The path to the assets db or sqlalchemy Engine object to use to
construct an AssetFinder.
"""
# Token used as a substitute for pickling objects that contain a
# reference to a TradingEnvironment
PERSISTENT_TOKEN = "<TradingEnvironment>"
def __init__(
self,
load=None,
bm_symbol='SPY',
exchange_tz="US/Eastern",
trading_calendar=None,
asset_db_path=':memory:',
future_chain_predicates=CHAIN_PREDICATES,
local_benchmark=None,
environ=None,
):
self.bm_symbol = bm_symbol
self.local_benchmark = local_benchmark
self.environ = environ
if not load:
load = partial(load_market_data, local_benchmark=self.local_benchmark, environ=self.environ)
self.trading_calendar = trading_calendar
if not self.trading_calendar:
self.trading_calendar = get_calendar("NYSE")
self.benchmark_returns, self.treasury_curves = load(
self.trading_calendar.day,
self.trading_calendar.schedule.index,
self.bm_symbol,
)
self.exchange_tz = exchange_tz
if isinstance(asset_db_path, string_types):
asset_db_path = 'sqlite:///' + asset_db_path
self.engine = engine = create_engine(asset_db_path)
else:
self.engine = engine = asset_db_path
if engine is not None:
AssetDBWriter(engine).init_db()
self.asset_finder = AssetFinder(
engine,
future_chain_predicates=future_chain_predicates)
else:
self.asset_finder = None
def update_local_bench(self, local_benchmark):
load = partial(load_market_data, local_benchmark=local_benchmark, environ=self.environ)
self.benchmark_returns, self.treasury_curves = load(
self.trading_calendar.day,
self.trading_calendar.schedule.index,
self.bm_symbol,
)
def write_data(self, **kwargs):
"""Write data into the asset_db.
Parameters
----------
**kwargs
Forwarded to AssetDBWriter.write
"""
AssetDBWriter(self.engine).write(**kwargs)
class SimulationParameters(object):
def __init__(self, start_session, end_session,
trading_calendar,
capital_base=DEFAULT_CAPITAL_BASE,
emission_rate='daily',
data_frequency='daily',
arena='backtest'):
assert type(start_session) == pd.Timestamp
assert type(end_session) == pd.Timestamp
assert trading_calendar is not None, \
"Must pass in trading calendar!"
assert start_session <= end_session, \
"Period start falls after period end."
assert start_session <= trading_calendar.last_trading_session, \
"Period start falls after the last known trading day."
assert end_session >= trading_calendar.first_trading_session, \
"Period end falls before the first known trading day."
# chop off any minutes or hours on the given start and end dates,
# as we only support session labels here (and we represent session
# labels as midnight UTC).
self._start_session = normalize_date(start_session)
self._end_session = normalize_date(end_session)
self._capital_base = capital_base
self._emission_rate = emission_rate
self._data_frequency = data_frequency
# copied to algorithm's environment for runtime access
self._arena = arena
self._trading_calendar = trading_calendar
if not trading_calendar.is_session(self._start_session):
# if the start date is not a valid session in this calendar,
# push it forward to the first valid session
self._start_session = trading_calendar.minute_to_session_label(
self._start_session
)
if not trading_calendar.is_session(self._end_session):
# if the end date is not a valid session in this calendar,
# pull it backward to the last valid session before the given
# end date.
self._end_session = trading_calendar.minute_to_session_label(
self._end_session, direction="previous"
)
self._first_open = trading_calendar.open_and_close_for_session(
self._start_session
)[0]
self._last_close = trading_calendar.open_and_close_for_session(
self._end_session
)[1]
@property
def capital_base(self):
return self._capital_base
@property
def emission_rate(self):
return self._emission_rate
@property
def data_frequency(self):
return self._data_frequency
@data_frequency.setter
def data_frequency(self, val):
self._data_frequency = val
@property
def arena(self):
return self._arena
@arena.setter
def arena(self, val):
self._arena = val
@property
def start_session(self):
return self._start_session
@property
def end_session(self):
return self._end_session
@property
def first_open(self):
return self._first_open
@property
def last_close(self):
return self._last_close
@property
@remember_last
def sessions(self):
return self._trading_calendar.sessions_in_range(
self.start_session,
self.end_session
)
def create_new(self, start_session, end_session):
return SimulationParameters(
start_session,
end_session,
self._trading_calendar,
capital_base=self.capital_base,
emission_rate=self.emission_rate,
data_frequency=self.data_frequency,
arena=self.arena
)
def __repr__(self):
return """
{class_name}(
start_session={start_session},
end_session={end_session},
capital_base={capital_base},
data_frequency={data_frequency},
emission_rate={emission_rate},
first_open={first_open},
last_close={last_close})\
""".format(class_name=self.__class__.__name__,
start_session=self.start_session,
end_session=self.end_session,
capital_base=self.capital_base,
data_frequency=self.data_frequency,
emission_rate=self.emission_rate,
first_open=self.first_open,
last_close=self.last_close)
def noop_load(*args, **kwargs):
"""
A method that can be substituted in as the load method in a
TradingEnvironment to prevent it from loading benchmarks.
Accepts any arguments, but returns only a tuple of Nones regardless
of input.
"""
return None, None
|
florentchandelier/zipline
|
zipline/finance/trading.py
|
Python
|
apache-2.0
| 9,694
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Display an image.
Usage::
display.py <filename>
A checkerboard background is visible behind any transparent areas of the
image.
"""
import sys
import pyglet
from pyglet.gl import *
window = pyglet.window.Window(visible=False, resizable=True)
@window.event
def on_draw():
background.blit_tiled(0, 0, 0, window.width, window.height)
img.blit(window.width // 2, window.height // 2, 0)
if len(sys.argv) != 2:
print(__doc__)
sys.exit(1)
filename = sys.argv[1]
img = pyglet.image.load(filename).get_texture(rectangle=True)
img.anchor_x = img.width // 2
img.anchor_y = img.height // 2
checks = pyglet.image.create(32, 32, pyglet.image.CheckerImagePattern())
background = pyglet.image.TileableTexture.create_for_image(checks)
# Enable alpha blending, required for image.blit.
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
window.width = img.width
window.height = img.height
window.set_visible()
pyglet.app.run()
|
bitcraft/pyglet
|
examples/image_display.py
|
Python
|
bsd-3-clause
| 2,766
|
""" xgettext tool
Tool specific initialization of `xgettext` tool.
"""
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/xgettext.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog"
#############################################################################
class _CmdRunner(object):
""" Callable object, which runs shell command storing its stdout and stderr to
variables. It also provides `strfunction()` method, which shall be used by
scons Action objects to print command string. """
def __init__(self, command, commandstr=None):
self.out = None
self.err = None
self.status = None
self.command = command
self.commandstr = commandstr
def __call__(self, target, source, env):
import SCons.Action
import subprocess
import os
import sys
kw = {
'stdin': 'devnull',
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'universal_newlines': True,
'shell': True
}
command = env.subst(self.command, target=target, source=source)
proc = SCons.Action._subproc(env, command, **kw)
self.out, self.err = proc.communicate()
self.status = proc.wait()
if self.err:
sys.stderr.write(unicode(self.err))
return self.status
def strfunction(self, target, source, env):
import os
comstr = self.commandstr
if env.subst(comstr, target=target, source=source) == "":
comstr = self.command
s = env.subst(comstr, target=target, source=source)
return s
#############################################################################
#############################################################################
def _update_pot_file(target, source, env):
""" Action function for `POTUpdate` builder """
import re
import os
import SCons.Action
nop = lambda target, source, env: 0
# Save scons cwd and os cwd (NOTE: they may be different. After the job, we
# revert each one to its original state).
save_cwd = env.fs.getcwd()
save_os_cwd = os.getcwd()
chdir = target[0].dir
chdir_str = repr(chdir.get_abspath())
# Print chdir message (employ SCons.Action.Action for that. It knows better
# than me how to to this correctly).
env.Execute(SCons.Action.Action(nop, "Entering " + chdir_str))
# Go to target's directory and do our job
env.fs.chdir(chdir, 1) # Go into target's directory
try:
cmd = _CmdRunner('$XGETTEXTCOM', '$XGETTEXTCOMSTR')
action = SCons.Action.Action(cmd, strfunction=cmd.strfunction)
status = action([target[0]], source, env)
except:
# Something went wrong.
env.Execute(SCons.Action.Action(nop, "Leaving " + chdir_str))
# Revert working dirs to previous state and re-throw exception.
env.fs.chdir(save_cwd, 0)
os.chdir(save_os_cwd)
raise
# Print chdir message.
env.Execute(SCons.Action.Action(nop, "Leaving " + chdir_str))
# Revert working dirs to previous state.
env.fs.chdir(save_cwd, 0)
os.chdir(save_os_cwd)
# If the command was not successfull, return error code.
if status: return status
new_content = cmd.out
if not new_content:
# When xgettext finds no internationalized messages, no *.pot is created
# (because we don't want to bother translators with empty POT files).
needs_update = False
explain = "no internationalized messages encountered"
else:
if target[0].exists():
# If the file already exists, it's left unaltered unless its messages
# are outdated (w.r.t. to these recovered by xgettext from sources).
old_content = target[0].get_text_contents()
re_cdate = re.compile(r'^"POT-Creation-Date: .*"$[\r\n]?', re.M)
old_content_nocdate = re.sub(re_cdate, "", old_content)
new_content_nocdate = re.sub(re_cdate, "", new_content)
if (old_content_nocdate == new_content_nocdate):
# Messages are up-to-date
needs_update = False
explain = "messages in file found to be up-to-date"
else:
# Messages are outdated
needs_update = True
explain = "messages in file were outdated"
else:
# No POT file found, create new one
needs_update = True
explain = "new file"
if needs_update:
# Print message employing SCons.Action.Action for that.
msg = "Writing " + repr(str(target[0])) + " (" + explain + ")"
env.Execute(SCons.Action.Action(nop, msg))
f = open(str(target[0]), "w")
f.write(new_content)
f.close()
return 0
else:
# Print message employing SCons.Action.Action for that.
msg = "Not writing " + repr(str(target[0])) + " (" + explain + ")"
env.Execute(SCons.Action.Action(nop, msg))
return 0
#############################################################################
#############################################################################
from SCons.Builder import BuilderBase
#############################################################################
class _POTBuilder(BuilderBase):
def _execute(self, env, target, source, *args):
if not target:
if 'POTDOMAIN' in env and env['POTDOMAIN']:
domain = env['POTDOMAIN']
else:
domain = 'messages'
target = [domain]
return BuilderBase._execute(self, env, target, source, *args)
#############################################################################
#############################################################################
def _scan_xgettext_from_files(target, source, env, files=None, path=None):
""" Parses `POTFILES.in`-like file and returns list of extracted file names.
"""
import re
import SCons.Util
import SCons.Node.FS
if files is None:
return 0
if not SCons.Util.is_List(files):
files = [files]
if path is None:
if 'XGETTEXTPATH' in env:
path = env['XGETTEXTPATH']
else:
path = []
if not SCons.Util.is_List(path):
path = [path]
path = SCons.Util.flatten(path)
dirs = ()
for p in path:
if not isinstance(p, SCons.Node.FS.Base):
if SCons.Util.is_String(p):
p = env.subst(p, source=source, target=target)
p = env.arg2nodes(p, env.fs.Dir)
dirs += tuple(p)
# cwd is the default search path (when no path is defined by user)
if not dirs:
dirs = (env.fs.getcwd(),)
# Parse 'POTFILE.in' files.
re_comment = re.compile(r'^#[^\n\r]*$\r?\n?', re.M)
re_emptyln = re.compile(r'^[ \t\r]*$\r?\n?', re.M)
re_trailws = re.compile(r'[ \t\r]+$')
for f in files:
# Find files in search path $XGETTEXTPATH
if isinstance(f, SCons.Node.FS.Base) and f.rexists():
contents = f.get_text_contents()
contents = re_comment.sub("", contents)
contents = re_emptyln.sub("", contents)
contents = re_trailws.sub("", contents)
depnames = contents.splitlines()
for depname in depnames:
depfile = SCons.Node.FS.find_file(depname, dirs)
if not depfile:
depfile = env.arg2nodes(depname, dirs[0].File)
env.Depends(target, depfile)
return 0
#############################################################################
#############################################################################
def _pot_update_emitter(target, source, env):
""" Emitter function for `POTUpdate` builder """
from SCons.Tool.GettextCommon import _POTargetFactory
import SCons.Util
import SCons.Node.FS
if 'XGETTEXTFROM' in env:
xfrom = env['XGETTEXTFROM']
else:
return target, source
if not SCons.Util.is_List(xfrom):
xfrom = [xfrom]
xfrom = SCons.Util.flatten(xfrom)
# Prepare list of 'POTFILE.in' files.
files = []
for xf in xfrom:
if not isinstance(xf, SCons.Node.FS.Base):
if SCons.Util.is_String(xf):
# Interpolate variables in strings
xf = env.subst(xf, source=source, target=target)
xf = env.arg2nodes(xf)
files.extend(xf)
if files:
env.Depends(target, files)
_scan_xgettext_from_files(target, source, env, files)
return target, source
#############################################################################
#############################################################################
from SCons.Environment import _null
#############################################################################
def _POTUpdateBuilderWrapper(env, target=None, source=_null, **kw):
return env._POTUpdateBuilder(target, source, **kw)
#############################################################################
#############################################################################
def _POTUpdateBuilder(env, **kw):
""" Creates `POTUpdate` builder object """
import SCons.Action
from SCons.Tool.GettextCommon import _POTargetFactory
kw['action'] = SCons.Action.Action(_update_pot_file, None)
kw['suffix'] = '$POTSUFFIX'
kw['target_factory'] = _POTargetFactory(env, alias='$POTUPDATE_ALIAS').File
kw['emitter'] = _pot_update_emitter
return _POTBuilder(**kw)
#############################################################################
#############################################################################
def generate(env, **kw):
""" Generate `xgettext` tool """
import SCons.Util
from SCons.Tool.GettextCommon import RPaths, _detect_xgettext
try:
env['XGETTEXT'] = _detect_xgettext(env)
except:
env['XGETTEXT'] = 'xgettext'
# NOTE: sources="$SOURCES" would work as well. However, we use following
# construction to convert absolute paths provided by scons onto paths
# relative to current working dir. Note, that scons expands $SOURCE(S) to
# absolute paths for sources $SOURCE(s) outside of current subtree (e.g. in
# "../"). With source=$SOURCE these absolute paths would be written to the
# resultant *.pot file (and its derived *.po files) as references to lines in
# source code (e.g. referring lines in *.c files). Such references would be
# correct (e.g. in poedit) only on machine on which *.pot was generated and
# would be of no use on other hosts (having a copy of source code located
# in different place in filesystem).
sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET' \
+ ', SOURCES)} $)'
# NOTE: the output from $XGETTEXTCOM command must go to stdout, not to a file.
# This is required by the POTUpdate builder's action.
xgettextcom = '$XGETTEXT $XGETTEXTFLAGS $_XGETTEXTPATHFLAGS' \
+ ' $_XGETTEXTFROMFLAGS -o - ' + sources
xgettextpathflags = '$( ${_concat( XGETTEXTPATHPREFIX, XGETTEXTPATH' \
+ ', XGETTEXTPATHSUFFIX, __env__, RDirs, TARGET, SOURCES)} $)'
xgettextfromflags = '$( ${_concat( XGETTEXTFROMPREFIX, XGETTEXTFROM' \
+ ', XGETTEXTFROMSUFFIX, __env__, target=TARGET, source=SOURCES)} $)'
env.SetDefault(
_XGETTEXTDOMAIN='${TARGET.filebase}',
XGETTEXTFLAGS=[],
XGETTEXTCOM=xgettextcom,
XGETTEXTCOMSTR='',
XGETTEXTPATH=[],
XGETTEXTPATHPREFIX='-D',
XGETTEXTPATHSUFFIX='',
XGETTEXTFROM=None,
XGETTEXTFROMPREFIX='-f',
XGETTEXTFROMSUFFIX='',
_XGETTEXTPATHFLAGS=xgettextpathflags,
_XGETTEXTFROMFLAGS=xgettextfromflags,
POTSUFFIX=['.pot'],
POTUPDATE_ALIAS='pot-update',
XgettextRPaths=RPaths(env)
)
env.Append(BUILDERS={
'_POTUpdateBuilder': _POTUpdateBuilder(env)
})
env.AddMethod(_POTUpdateBuilderWrapper, 'POTUpdate')
env.AlwaysBuild(env.Alias('$POTUPDATE_ALIAS'))
#############################################################################
#############################################################################
def exists(env):
""" Check, whether the tool exists """
from SCons.Tool.GettextCommon import _xgettext_exists
try:
return _xgettext_exists(env)
except:
return False
#############################################################################
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mapycz/mapnik
|
scons/scons-local-3.0.1/SCons/Tool/xgettext.py
|
Python
|
lgpl-2.1
| 13,879
|
from django.db import models
from gitireadme.utils import getUploadToPath
import datetime
class Article(models.Model):
name = models.CharField(max_length=255,blank=True,null=True)
path = models.CharField(max_length=255,blank=True,null=True)
class ArticleAlias(models.Model):
repo = models.CharField(max_length=255,blank=True,null=True)
article = models.ForeignKey(Article)
|
gitireadme/gitireadme.server
|
gitireadme/article/models.py
|
Python
|
apache-2.0
| 391
|
from __future__ import absolute_import
import json
import subprocess
import conveyor
def run_command(command):
return subprocess.Popen(command, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0].strip()
def get_json_from_command(command):
return json.loads(run_command(command))
def test_create_and_get_application():
run_command('./bin/hoist --groups=group1 application create test 1')
app = get_json_from_command('./bin/hoist application get test')
assert app['groups'] == ['group1']
assert app['version'] == '1'
def test_list_applications():
assert get_json_from_command('./bin/hoist application list') == ['test']
def test_delete_applications():
run_command('./bin/hoist application delete test')
|
mconigliaro/conveyor
|
conveyor/tests/hoist_tests.py
|
Python
|
gpl-3.0
| 772
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import re
from flexget import plugin
from flexget.event import event
log = logging.getLogger('manipulate')
class Manipulate(object):
"""
Usage:
manipulate:
- <destination field>:
[phase]: <phase>
[from]: <source field>
[extract]: <regexp>
[separator]: <text>
[replace]:
regexp: <regexp>
format: <regexp>
[remove]: <boolean>
Example:
manipulate:
- title:
extract: \[\d\d\d\d\](.*)
"""
schema = {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': {
'type': 'object',
'properties': {
'phase': {'enum': ['metainfo', 'filter', 'modify']},
'from': {'type': 'string'},
'extract': {'type': 'string', 'format': 'regex'},
'separator': {'type': 'string'},
'remove': {'type': 'boolean'},
'replace': {
'type': 'object',
'properties': {
'regexp': {'type': 'string', 'format': 'regex'},
'format': {'type': 'string'},
},
'required': ['regexp', 'format'],
'additionalProperties': False,
},
},
'additionalProperties': False,
},
},
}
def on_task_start(self, task, config):
"""
Separates the config into a dict with a list of jobs per phase.
Allows us to skip phases without any jobs in them.
"""
self.phase_jobs = {'filter': [], 'metainfo': [], 'modify': []}
for item in config:
for item_config in item.values():
# Get the phase specified for this item, or use default of metainfo
phase = item_config.get('phase', 'metainfo')
self.phase_jobs[phase].append(item)
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_metainfo(self, task, config):
if not self.phase_jobs['metainfo']:
# return if no jobs for this phase
return
modified = sum(self.process(entry, self.phase_jobs['metainfo']) for entry in task.entries)
log.verbose('Modified %d entries.' % modified)
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_filter(self, task, config):
if not self.phase_jobs['filter']:
# return if no jobs for this phase
return
modified = sum(
self.process(entry, self.phase_jobs['filter'])
for entry in task.entries + task.rejected
)
log.verbose('Modified %d entries.' % modified)
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_modify(self, task, config):
if not self.phase_jobs['modify']:
# return if no jobs for this phase
return
modified = sum(
self.process(entry, self.phase_jobs['modify'])
for entry in task.entries + task.rejected
)
log.verbose('Modified %d entries.' % modified)
def process(self, entry, jobs):
"""Process given jobs from config for an entry.
:param entry: Entry to modify
:param jobs: Config items to run on this entry
:return: True if any fields were modified
"""
modified = False
for item in jobs:
for field, config in item.items():
from_field = field
if 'from' in config:
from_field = config['from']
field_value = entry.get(from_field)
log.debug(
'field: `%s` from_field: `%s` field_value: `%s`'
% (field, from_field, field_value)
)
if config.get('remove'):
if field in entry:
del entry[field]
modified = True
continue
if 'extract' in config:
if not field_value:
log.warning('Cannot extract, field `%s` is not present' % from_field)
continue
match = re.search(config['extract'], field_value, re.I | re.U)
if match:
groups = [x for x in match.groups() if x is not None]
log.debug('groups: %s' % groups)
field_value = config.get('separator', ' ').join(groups).strip()
log.debug('field `%s` after extract: `%s`' % (field, field_value))
if 'replace' in config:
if not field_value:
log.warning('Cannot replace, field `%s` is not present' % from_field)
continue
replace_config = config['replace']
regexp = re.compile(replace_config['regexp'], flags=re.I | re.U)
field_value = regexp.sub(replace_config['format'], field_value).strip()
log.debug('field `%s` after replace: `%s`' % (field, field_value))
if from_field != field or entry[field] != field_value:
log.verbose('Field `%s` is now `%s`' % (field, field_value))
modified = True
entry[field] = field_value
return modified
@event('plugin.register')
def register_plugin():
plugin.register(Manipulate, 'manipulate', api_ver=2)
|
gazpachoking/Flexget
|
flexget/plugins/modify/manipulate.py
|
Python
|
mit
| 5,810
|
# -*- coding: utf-8 -*-
#
# satcfe/__init__.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
__version__ = '0.0.9'
logging.getLogger('satcfe').addHandler(logging.NullHandler())
from satcomum.constantes import VERSAO_ER
from .base import BibliotecaSAT
from .clientelocal import ClienteSATLocal
from .clientesathub import ClienteSATHub
|
kmee/satcfe
|
satcfe/__init__.py
|
Python
|
apache-2.0
| 897
|
#!/usr/bin/env python2
# -*- coding: utf8 -*-
"""
get_dir_size.py: Various ways to calculate the size of a directory tree.
"""
#==============================================================================
# Various ways to calculate the size of a directory tree or a single file.
# Include methods to convert a size in bytes to the best standard IEC binary
# prefix to improve readability.
#
# The IEC (International Electrotechnical Commission) binary prefixes for
# quantities of digital information are these:
#
# IEC binary prefixes SI decimal prefixes Equivalences
# ======================= ======================= ========================
# Prefix Symbol bytes Prefix Symbol bytes IEC prefix SI prefix
# ======== ====== ===== ========= ====== ===== ============ ===========
#
# kibibyte KiB 2¹⁰ kilobyte kB 10³ 1 KiB 1.024 kB
# mebibyte MiB 2²⁰ megabyte MB 10⁶ 1 MiB 1.049 MB
# gibibyte GiB 2³⁰ gigabyte GB 10⁹ 1 GiB 1.074 GB
# tebibyte TiB 2⁴⁰ terabyte TB 10¹² 1 TiB 1.100 TB
# pebibyte PiB 2⁵⁰ petabyte PB 10¹⁵ 1 PiB 1.126 EB
# exbibyte EiB 2⁶⁰ exabyte EB 10¹⁸ 1 EiB 1.153 EB
# zebibyte ZiB 2⁷⁰ zettabyte ZB 10²¹ 1 ZiB 1.181 EB
# yobibyte YiB 2⁸⁰ yottabyte YB 10²⁴ 1 YiB 1.209 ZB
#
#==============================================================================
#==============================================================================
# Copyright 2010 joe di castro <joe@joedicastro.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#==============================================================================
__author__ = "joe di castro <joe@joedicastro.com>"
__license__ = "GNU General Public License version 3"
__date__ = "30/12/2010"
__version__ = "0.1"
try:
import sys
import os
import time
from subprocess import Popen, PIPE
except ImportError:
# Checks the installation of the necessary python modules
print((os.linesep * 2).join(["An error found importing one module:",
str(sys.exc_info()[1]), "You need to install it", "Stopping..."]))
sys.exit(-2)
# Not Mine, Seen out there. Approximate size, non accurate. Don't go in hidden
# files and dirs, and don't take into account the ".." especial files and
# symbolic links. No works with a single file
def get_dir_size(the_path):
"""Get size of a directory tree in bytes."""
path_size = 0
for path, dirs, files in os.walk(the_path):
for fil in files:
filename = os.path.join(path, fil)
path_size += os.path.getsize(filename)
return path_size
# My Version, accurate. Same results as *NIX command "du -bs". Take in
# consideration symbolic links and don't follow them. Works with single files
def get_size(the_path):
"""Get size of a directory tree or a file in bytes."""
path_size = 0
for path, directories, files in os.walk(the_path):
for filename in files:
path_size += os.lstat(os.path.join(path, filename)).st_size
for directory in directories:
path_size += os.lstat(os.path.join(path, directory)).st_size
path_size += os.path.getsize(the_path)
return path_size
# less pythonic, but faster and still accurate. Take in consideration the
# symbolic links and don't follow them
def get_size_fast(the_path):
"""Get size of a directory tree or a file in bytes."""
def get_sizes(the_path):
"""Make a generator of individual file & directory sizes."""
if not os.path.islink(the_path):
if os.path.isdir(the_path):
for file_or_dir in os.listdir(the_path):
path = os.path.join(the_path, file_or_dir)
if os.path.isfile(path):
yield os.lstat(path).st_size
else:
for size in get_sizes(path):
yield size
yield os.lstat(the_path).st_size
else:
yield os.lstat(the_path).st_size
return sum(get_sizes(the_path))
# This converts a size in bytes to the best unit, using IEC binary prefixes.
def best_unit_size(bytes_size):
"""Get a size in bytes & convert it to the best IEC prefix for readability.
Return a dictionary with three pair of keys/values:
"s" -- (float) Size of path converted to the best unit for easy read
"u" -- (str) The prefix (IEC) for s (from bytes(2^0) to YiB(2^80))
"b" -- (int / long) The original size in bytes
"""
for exp in range(0, 90, 10):
bu_size = abs(bytes_size) / pow(2.0, exp)
if int(bu_size) < 2 ** 10:
unit = {0: "bytes", 10: "KiB", 20: "MiB", 30: "GiB", 40: "TiB",
50: "PiB", 60: "EiB", 70: "ZiB", 80: "YiB"}[exp]
break
return {"s": bu_size, "u": unit, "b": bytes_size}
# Combination of calculating the size in bytes and conversion to best IEC
# prefix in one function.
def get_unit_size(the_path):
"""Calculate size of a directory/file & convert it for the best IEC prefix.
Return a dictionary with three pair of keys/values:
"s" -- (float) Size of path converted to the best unit for easy read
"u" -- (str) The prefix (IEC) for s (from bytes(2^0) to YiB(2^80))
"b" -- (int / long) The original size in bytes
"""
bytes_size = 0
for path, directories, files in os.walk(the_path):
for filename in files:
bytes_size += os.lstat(os.path.join(path, filename)).st_size
for directory in directories:
bytes_size += os.lstat(os.path.join(path, directory)).st_size
bytes_size += os.path.getsize(the_path)
for exp in range(0, 90, 10):
bu_size = abs(bytes_size) / pow(2.0, exp)
if int(bu_size) < 2 ** 10:
unit = {0: "bytes", 10: "KiB", 20: "MiB", 30: "GiB", 40: "TiB",
50: "PiB", 60: "EiB", 70: "ZiB", 80: "YiB"}[exp]
break
return {"s": bu_size, "u": unit, "b": bytes_size}
class GetSize:
"""Create a GetSize object that converts size(bytes) to the best IEC prefix
The size of this object can be obtained from a path (directory or file) or
from a size in bytes.
"""
def __init__(self):
"""Create the object GetSize itself and set various attributes.
These attributes are about the size of a file or directory tree:
bytes = The size in bytes
size = The size in the best IEC unit prefix for readability
unit = The IEC prefix of size
"""
self.bytes = 0
self.size = 0
self.unit = "bytes"
def from_bytes(self, sz_bytes):
"""Get size & IEC prefix from size in bytes."""
self.bytes = sz_bytes
for exp in range(0, 90, 10):
self.size = abs(self.bytes) / pow(2.0, exp)
if int(self.size) < 2 ** 10:
self.unit = {0: "bytes", 10: "KiB", 20: "MiB", 30: "GiB",
40: "TiB", 50: "PiB", 60: "EiB", 70: "ZiB",
80: "YiB"}[exp]
break
def from_path(self, a_path):
"""Get size & IEC prefix from a directory or file."""
for path, directories, files in os.walk(a_path):
for filename in files:
self.bytes += os.lstat(os.path.join(path, filename)).st_size
for directory in directories:
self.bytes += os.lstat(os.path.join(path, directory)).st_size
self.bytes += os.path.getsize(a_path)
self.from_bytes(self.bytes)
def main():
"""Main section"""
my_path = ".."
functions = [get_dir_size, get_size, get_size_fast]
def timeit(ftn, *args):
"""Get time consumed by a function."""
time_start = time.time()
sz_du = ftn(args[0])
time_end = time.time()
return sz_du, time_end - time_start
# Show results from standard *NIX command 'du'
print(" Space in bytes 'du' Diff Time")
print(" ===== ======== ========= ====")
print("$ du -bs".center(50) + os.linesep + ("-" * 8).center(50))
tm_du_start = time.time()
bytes_du = int(Popen(["du", "-bs", my_path], stdout=PIPE).stdout.
readlines()[0].split()[0])
tm_du = time.time() - tm_du_start
sz_du = best_unit_size(bytes_du)
print("{0:.1f} {1} {2:12} n/a {3:12.4f}s".
format(sz_du["s"], sz_du["u"], sz_du["b"], tm_du) + os.linesep)
# Show results of the distinct Python methods to compare speed & precision
for fnct in functions:
fname = fnct.__name__
print(fname.center(50) + os.linesep + ("-" * len(fname)).center(50))
bytes_fn, time_fn = timeit(fnct, my_path)
sz_fn = best_unit_size(bytes_fn)
diff_fn = best_unit_size(bytes_du - sz_fn["b"])
print("{0:.1f} {1} {2:12} {3:10.2f} {4:5}{5:8.4f}s".
format(sz_fn["s"], sz_fn["u"], sz_fn["b"], diff_fn["s"],
diff_fn["u"], time_fn) + os.linesep)
# get_unit_size as a combination of two functions requires separate code
print("get_unit_size".center(50) + os.linesep + ("-" * 13).center(50))
sz_gus, tm_gus = timeit(get_unit_size, my_path)
diff_gus = best_unit_size(bytes_du - sz_gus["b"])
print("{0:.1f} {1} {2:12} {3:10.2f} {4:5}{5:8.4f}s".
format(sz_gus["s"], sz_gus["u"], sz_gus["b"], diff_gus["s"],
diff_gus["u"], tm_gus) + os.linesep)
# shows results from GetSize class
print("GetSize Class".center(50) + os.linesep + ("-" * 13).center(50))
time_class_start = time.time()
sz_class = GetSize()
sz_class.from_path(my_path)
tm_class = time.time() - time_class_start
diff_class = GetSize()
diff_class.from_bytes(bytes_du - sz_class.bytes)
print("{0:.1f} {1} {2:12} {3:10.2f} {4:5}{5:8.4f}s".
format(sz_class.size, sz_class.unit, sz_class.bytes, diff_class.size,
diff_class.unit, tm_class) + os.linesep)
if __name__ == "__main__":
main()
|
joedicastro/python-recipes
|
get_size.py
|
Python
|
gpl-3.0
| 10,830
|
import glob, sys
from xml.sax import make_parser
from db_classes import *
from db_output import *
from MaKaC.common.Configuration import Config
from MaKaC.common import Configuration
import MaKaC.debug as debug
debug.debugW = False
bk = BKrepository("/data/indico/backup.agenda")
mh = MonHandler(bk)
db = Zdb()
db.open()
saxparser = make_parser()
saxparser.setContentHandler(mh)
for obj in \
"info", \
"holder", \
"user", \
"admin", \
"category", \
"group", \
"conf",\
:
listdir = map(lambda x: os.path.basename(x),
glob.glob("%s/%s*.xml"%(bk.backupArea,obj)))
listdir.sort()
for file in listdir:
if file.find(obj) != 0 or \
file.find('.xml') < 0: continue
mh.currentXMLfile = file
print >> sys.stderr, "%s"%file
saxparser.parse("%s/%s"%(bk.backupArea,file))
db.commit()
os.system("chown -R daemon %s/" %
Configuration.Config.getInstance().getArchivePath())
|
belokop-an/agenda-tools
|
tools/backup/db_import.py
|
Python
|
gpl-2.0
| 1,101
|
import json
import logging
import gzip
import socket
import ssl
import sys
import traceback
from datetime import datetime
from threading import Lock, Thread
from time import sleep
from typing import Optional
import websocket
from vnpy.trader.utility import get_file_logger
class WebsocketClient:
"""
Websocket API
After creating the client object, use start() to run worker and ping threads.
The worker thread connects websocket automatically.
Use stop to stop threads and disconnect websocket before destroying the client
object (especially when exiting the programme).
Default serialization format is json.
Callbacks to overrides:
* unpack_data
* on_connected
* on_disconnected
* on_packet
* on_error
After start() is called, the ping thread will ping server every 60 seconds.
If you want to send anything other than JSON, override send_packet.
"""
def __init__(self):
"""Constructor"""
self.host = None
self._ws_lock = Lock()
self._ws = None
self._worker_thread = None
self._ping_thread = None
self._active = False
self.proxy_host = None
self.proxy_port = None
self.ping_interval = 60 # seconds
self.header = {}
self.logger: Optional[logging.Logger] = None
# For debugging
self._last_sent_text = None
self._last_received_text = None
def init(self,
host: str,
proxy_host: str = "",
proxy_port: int = 0,
ping_interval: int = 60,
header: dict = None,
log_path: Optional[str] = None,
):
"""
:param host:
:param proxy_host:
:param proxy_port:
:param header:
:param ping_interval: unit: seconds, type: int
:param log_path: optional. file to save log.
"""
self.host = host
self.ping_interval = ping_interval # seconds
if log_path is not None:
self.logger = get_file_logger(log_path)
self.logger.setLevel(logging.DEBUG)
if header:
self.header = header
if proxy_host and proxy_port:
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def start(self):
"""
Start the client and on_connected function is called after webscoket
is connected succesfully.
Please don't send packet untill on_connected fucntion is called.
"""
self._active = True
self._worker_thread = Thread(target=self._run)
self._worker_thread.start()
self._ping_thread = Thread(target=self._run_ping)
self._ping_thread.start()
def stop(self):
"""
Stop the client.
"""
self._active = False
self._disconnect()
def join(self):
"""
Wait till all threads finish.
This function cannot be called from worker thread or callback function.
"""
self._ping_thread.join()
self._worker_thread.join()
def send_packet(self, packet: dict):
"""
Send a packet (dict data) to server
override this if you want to send non-json packet
"""
text = json.dumps(packet)
self._record_last_sent_text(text)
return self._send_text(text)
def _log(self, msg, *args):
logger = self.logger
if logger:
logger.debug(msg, *args)
def _send_text(self, text: str):
"""
Send a text string to server.
"""
ws = self._ws
if ws:
ws.send(text, opcode=websocket.ABNF.OPCODE_TEXT)
self._log('sent text: %s', text)
def _send_binary(self, data: bytes):
"""
Send bytes data to server.
"""
ws = self._ws
if ws:
ws._send_binary(data)
self._log('sent binary: %s', data)
def _create_connection(self, *args, **kwargs):
""""""
return websocket.create_connection(*args, **kwargs)
def _ensure_connection(self):
""""""
triggered = False
with self._ws_lock:
if self._ws is None:
self._ws = self._create_connection(
self.host,
sslopt={"cert_reqs": ssl.CERT_NONE},
http_proxy_host=self.proxy_host,
http_proxy_port=self.proxy_port,
header=self.header
)
triggered = True
if triggered:
self.on_connected()
def _disconnect(self):
"""
"""
triggered = False
with self._ws_lock:
if self._ws:
ws: websocket.WebSocket = self._ws
self._ws = None
triggered = True
if triggered:
ws.close()
self.on_disconnected()
def _run(self):
"""
Keep running till stop is called.
"""
try:
while self._active:
try:
self._ensure_connection()
ws = self._ws
if ws:
recv_data = ws.recv()
# ws object is closed when recv function is blocking
if not recv_data:
self._disconnect()
continue
self._record_last_received_text(recv_data)
try:
if isinstance(recv_data, bytes):
recv_data = gzip.decompress(recv_data)
data = self.unpack_data(recv_data)
except ValueError as e:
print("websocket unable to parse data: " + recv_data, file=sys.stderr)
raise e
self._log('recv data: %s', data)
self.on_packet(data)
# ws is closed before recv function is called
# For socket.error, see Issue #1608
except (
websocket.WebSocketConnectionClosedException,
websocket.WebSocketBadStatusException,
socket.error
):
self._disconnect()
# other internal exception raised in on_packet
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
@staticmethod
def unpack_data(data: str):
"""
Default serialization format is json.
override this method if you want to use other serialization format.
"""
return json.loads(data)
def _run_ping(self):
""""""
while self._active:
try:
self._ping()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
# self._run() will reconnect websocket
sleep(1)
for i in range(self.ping_interval):
if not self._active:
break
sleep(1)
def _ping(self):
""""""
ws = self._ws
if ws:
ws.send("ping", websocket.ABNF.OPCODE_PING)
@staticmethod
def on_connected():
"""
Callback when websocket is connected successfully.
"""
pass
@staticmethod
def on_disconnected():
"""
Callback when websocket connection is lost.
"""
pass
@staticmethod
def on_packet(packet: dict):
"""
Callback when receiving data from server.
"""
pass
def on_error(self, exception_type: type, exception_value: Exception, tb):
"""
Callback when exception raised.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb)
)
return sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self, exception_type: type, exception_value: Exception, tb
):
"""
Print detailed exception information.
"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
def _record_last_sent_text(self, text: str):
"""
Record last sent text for debug purpose.
"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""
Record last received text for debug purpose.
"""
self._last_received_text = text[:1000]
|
msincenselee/vnpy
|
vnpy/api/websocket/websocket_client.py
|
Python
|
mit
| 9,241
|
#!/usr/bin/env python2.7
import numpy as np
import random
import time
import itertools
# Implementation of Linear UCB
class LinUCB:
# all_articles = []
all_M = None
all_M_inv = None
all_b = None
all_w = None
mapping = {}
keyList=None
firstTS = None
articleSize = 1
totalLine = 0
alpha = 0.2
current_article = None # current recommendation
current_user = None # user for which the article was recommended
def set_articles(self, articles):
print 'We are using an alpha of: %f \n' %(self.alpha)
print 'VECTORIZED VERSION!!!!!\n'
self.keyList = np.array(articles.keys())
#self.valueList = np.array(articles.values())
self.articleSize = self.keyList.size
self.all_w = np.zeros((6,self.articleSize))
M = np.identity(6)
self.all_M = np.tile(M,(1,self.articleSize))
self.all_M_inv = np.tile(M,(1,self.articleSize))
self.all_b = np.zeros((6,self.articleSize))
self.firstTS = np.zeros((1,self.articleSize))
for idx, article in enumerate(articles.keys()):
self.mapping[article] = idx
def recommend(self, timestamp, user_features, articles):
user_features = np.reshape(user_features, (6, 1))
indicesOfArticles = [self.mapping[article] for article in articles]
#exploit part
w_x = self.all_w[:,indicesOfArticles]
exploitPart=np.dot(w_x.T,user_features)
#explorePart
indicesForM_inv = [range(index*6,index*6+6) for index in indicesOfArticles]
indicesForM_inv = list(itertools.chain(*indicesForM_inv))
allM_inv = self.all_M_inv[:,indicesForM_inv]
explorePart = np.dot((np.dot(user_features.T,allM_inv)).reshape(len(articles),6),user_features)
explorePart = self.alpha*np.power(explorePart,0.5)
#time part
self.firstTS[self.firstTS[:,indicesOfArticles] == 0] = timestamp-1.001 #update all which are new
timepart = [timestamp]*len(articles) - self.firstTS[:,indicesOfArticles]
timepart = 1/np.log(timepart)
UCB = (exploitPart + explorePart + timepart.T).flatten()
#print UCB
#bestArticlesIndices = UCB==max(UCB)
#articlesArray = np.array(articles)
bestArticle=articles[np.argmax(UCB)]
#if sum(bestArticlesIndices) == 1:
# bestArticle = articlesArray[bestArticlesIndices][0]
#else:
# bestArticle = random.choice(articlesArray[bestArticlesIndices])
self.current_user = user_features
self.current_article = bestArticle
return bestArticle
def update(self, reward):
if reward == 0 or reward == 1:
self.totalLine += 1
#start = time.time()
article = self.current_article
user = self.current_user
indexForArticle = np.zeros((self.articleSize),dtype=bool)
indexForArticle[self.mapping[article]] = True
M = self.all_M[:,indexForArticle.repeat(6)]
b = self.all_b[:,indexForArticle]
self.all_M[:,indexForArticle.repeat(6)] = M + np.dot(user, user.T)
self.all_b[:,indexForArticle] = b + reward * user
# precompute M^-1 and w for UCB
self.all_M_inv[:,indexForArticle.repeat(6)] = np.linalg.inv(self.all_M[:,indexForArticle.repeat(6)])
self.all_w[:,indexForArticle] = np.dot(self.all_M_inv[:,indexForArticle.repeat(6)], self.all_b[:,indexForArticle])
#end = time.time()
#print end - start
linucb = LinUCB()
# Evaluator will call this function and pass the article features.
# Check evaluator.py description for details.
def set_articles(art):
linucb.set_articles(art)
# This function will be called by the evaluator.
# Check task description for details.
def update(reward):
linucb.update(reward)
# This function will be called by the evaluator.
# Check task description for details.
def reccomend(timestamp, user_features, articles):
return linucb.recommend(timestamp, user_features, articles)
|
lukaselmer/ethz-data-mining
|
4-bandit/code/policyLinUCBVectorizedTimestamp.py
|
Python
|
mit
| 4,092
|
# This module contains HipChat related functions
def get_message_color(return_code):
if(return_code != 0):
return "red"
else:
return "green"
def pack_json_response(message, color="green"):
message = message.replace('"', '"') # some basic escaping
response = '{ "color": "'+color+'", \
"message": "'+message+'", \
"notify": false, \
"message_format": "html" }'
return response
def read_json_request(json_request):
content = json_request['item']['message']['message']
items = content.split(' ')
return items
|
Wide-Net/widebot
|
hiputils.py
|
Python
|
mit
| 609
|
from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.models import signals
from django.dispatch import receiver
from django.test import TestCase, mock
from django.test.utils import isolate_apps
from django.utils import six
from .models import Author, Book, Car, Person
class BaseSignalTest(TestCase):
def setUp(self):
# Save up the number of connected signals so that we can check at the
# end that all the signals we register get properly unregistered (#9989)
self.pre_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
def tearDown(self):
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
self.assertEqual(self.pre_signals, post_signals)
class SignalTests(BaseSignalTest):
def test_model_pre_init_and_post_init(self):
data = []
def pre_init_callback(sender, args, **kwargs):
data.append(kwargs['kwargs'])
signals.pre_init.connect(pre_init_callback)
def post_init_callback(sender, instance, **kwargs):
data.append(instance)
signals.post_init.connect(post_init_callback)
p1 = Person(first_name="John", last_name="Doe")
self.assertEqual(data, [{}, p1])
def test_save_signals(self):
data = []
def pre_save_handler(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("raw", False))
)
def post_save_handler(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("created"), kwargs.get("raw", False))
)
signals.pre_save.connect(pre_save_handler, weak=False)
signals.post_save.connect(post_save_handler, weak=False)
try:
p1 = Person.objects.create(first_name="John", last_name="Smith")
self.assertEqual(data, [
(p1, False),
(p1, True, False),
])
data[:] = []
p1.first_name = "Tom"
p1.save()
self.assertEqual(data, [
(p1, False),
(p1, False, False),
])
data[:] = []
# Calling an internal method purely so that we can trigger a "raw" save.
p1.save_base(raw=True)
self.assertEqual(data, [
(p1, True),
(p1, False, True),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
self.assertEqual(data, [
(p2, False),
(p2, True, False),
])
data[:] = []
p2.id = 99998
p2.save()
self.assertEqual(data, [
(p2, False),
(p2, True, False),
])
finally:
signals.pre_save.disconnect(pre_save_handler)
signals.post_save.disconnect(post_save_handler)
def test_delete_signals(self):
data = []
def pre_delete_handler(signal, sender, instance, **kwargs):
data.append(
(instance, instance.id is None)
)
# #8285: signals can be any callable
class PostDeleteHandler(object):
def __init__(self, data):
self.data = data
def __call__(self, signal, sender, instance, **kwargs):
self.data.append(
(instance, instance.id is None)
)
post_delete_handler = PostDeleteHandler(data)
signals.pre_delete.connect(pre_delete_handler, weak=False)
signals.post_delete.connect(post_delete_handler, weak=False)
try:
p1 = Person.objects.create(first_name="John", last_name="Smith")
p1.delete()
self.assertEqual(data, [
(p1, False),
(p1, False),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
p2.id = 99998
p2.save()
p2.delete()
self.assertEqual(data, [
(p2, False),
(p2, False)
])
data[:] = []
self.assertQuerysetEqual(
Person.objects.all(), [
"James Jones",
],
six.text_type
)
finally:
signals.pre_delete.disconnect(pre_delete_handler)
signals.post_delete.disconnect(post_delete_handler)
def test_decorators(self):
data = []
@receiver(signals.pre_save, weak=False)
def decorated_handler(signal, sender, instance, **kwargs):
data.append(instance)
@receiver(signals.pre_save, sender=Car, weak=False)
def decorated_handler_with_sender_arg(signal, sender, instance, **kwargs):
data.append(instance)
try:
c1 = Car.objects.create(make="Volkswagen", model="Passat")
self.assertEqual(data, [c1, c1])
finally:
signals.pre_save.disconnect(decorated_handler)
signals.pre_save.disconnect(decorated_handler_with_sender_arg, sender=Car)
def test_save_and_delete_signals_with_m2m(self):
data = []
def pre_save_handler(signal, sender, instance, **kwargs):
data.append('pre_save signal, %s' % instance)
if kwargs.get('raw'):
data.append('Is raw')
def post_save_handler(signal, sender, instance, **kwargs):
data.append('post_save signal, %s' % instance)
if 'created' in kwargs:
if kwargs['created']:
data.append('Is created')
else:
data.append('Is updated')
if kwargs.get('raw'):
data.append('Is raw')
def pre_delete_handler(signal, sender, instance, **kwargs):
data.append('pre_delete signal, %s' % instance)
data.append('instance.id is not None: %s' % (instance.id is not None))
def post_delete_handler(signal, sender, instance, **kwargs):
data.append('post_delete signal, %s' % instance)
data.append('instance.id is not None: %s' % (instance.id is not None))
signals.pre_save.connect(pre_save_handler, weak=False)
signals.post_save.connect(post_save_handler, weak=False)
signals.pre_delete.connect(pre_delete_handler, weak=False)
signals.post_delete.connect(post_delete_handler, weak=False)
try:
a1 = Author.objects.create(name='Neal Stephenson')
self.assertEqual(data, [
"pre_save signal, Neal Stephenson",
"post_save signal, Neal Stephenson",
"Is created"
])
data[:] = []
b1 = Book.objects.create(name='Snow Crash')
self.assertEqual(data, [
"pre_save signal, Snow Crash",
"post_save signal, Snow Crash",
"Is created"
])
data[:] = []
# Assigning and removing to/from m2m shouldn't generate an m2m signal.
b1.authors.set([a1])
self.assertEqual(data, [])
b1.authors.set([])
self.assertEqual(data, [])
finally:
signals.pre_save.disconnect(pre_save_handler)
signals.post_save.disconnect(post_save_handler)
signals.pre_delete.disconnect(pre_delete_handler)
signals.post_delete.disconnect(post_delete_handler)
def test_disconnect_in_dispatch(self):
"""
Test that signals that disconnect when being called don't mess future
dispatching.
"""
class Handler(object):
def __init__(self, param):
self.param = param
self._run = False
def __call__(self, signal, sender, **kwargs):
self._run = True
signal.disconnect(receiver=self, sender=sender)
a, b = Handler(1), Handler(2)
signals.post_save.connect(a, sender=Person, weak=False)
signals.post_save.connect(b, sender=Person, weak=False)
Person.objects.create(first_name='John', last_name='Smith')
self.assertTrue(a._run)
self.assertTrue(b._run)
self.assertEqual(signals.post_save.receivers, [])
@mock.patch('weakref.ref')
def test_lazy_model_signal(self, ref):
def callback(sender, args, **kwargs):
pass
signals.pre_init.connect(callback)
signals.pre_init.disconnect(callback)
self.assertTrue(ref.called)
ref.reset_mock()
signals.pre_init.connect(callback, weak=False)
signals.pre_init.disconnect(callback)
ref.assert_not_called()
class LazyModelRefTest(BaseSignalTest):
def setUp(self):
super(LazyModelRefTest, self).setUp()
self.received = []
def receiver(self, **kwargs):
self.received.append(kwargs)
def test_invalid_sender_model_name(self):
msg = "Invalid model reference 'invalid'. String model references must be of the form 'app_label.ModelName'."
with self.assertRaisesMessage(ValueError, msg):
signals.post_init.connect(self.receiver, sender='invalid')
def test_already_loaded_model(self):
signals.post_init.connect(
self.receiver, sender='signals.Book', weak=False
)
try:
instance = Book()
self.assertEqual(self.received, [{
'signal': signals.post_init,
'sender': Book,
'instance': instance
}])
finally:
signals.post_init.disconnect(self.receiver, sender=Book)
@isolate_apps('signals', kwarg_name='apps')
def test_not_loaded_model(self, apps):
signals.post_init.connect(
self.receiver, sender='signals.Created', weak=False, apps=apps
)
try:
class Created(models.Model):
pass
instance = Created()
self.assertEqual(self.received, [{
'signal': signals.post_init, 'sender': Created, 'instance': instance
}])
finally:
signals.post_init.disconnect(self.receiver, sender=Created)
@isolate_apps('signals', kwarg_name='apps')
def test_disconnect(self, apps):
received = []
def receiver(**kwargs):
received.append(kwargs)
signals.post_init.connect(receiver, sender='signals.Created', apps=apps)
signals.post_init.disconnect(receiver, sender='signals.Created', apps=apps)
class Created(models.Model):
pass
Created()
self.assertEqual(received, [])
def test_register_model_class_senders_immediately(self):
"""
Model signals registered with model classes as senders don't use the
Apps.lazy_model_operation() mechanism.
"""
# Book isn't registered with apps2, so it will linger in
# apps2._pending_operations if ModelSignal does the wrong thing.
apps2 = Apps()
signals.post_init.connect(self.receiver, sender=Book, apps=apps2)
self.assertEqual(list(apps2._pending_operations), [])
|
loic/django
|
tests/signals/tests.py
|
Python
|
bsd-3-clause
| 11,802
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2011 Juan David Ibáñez Palomar <jdavid@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from standard library
import urllib, urllib2
# Import from itools
from itools.core import proto_lazy_property
from itools.datatypes import Enumerate, String, Unicode
from itools.gettext import MSG
from itools.web import get_context
# Import from ikaaro
from autoedit import AutoEdit
from config import Configuration
from fields import Field, Select_Field, Text_Field
from folder import Folder
from resource_ import DBResource
from utils import make_stl_template
from widgets import TextWidget, RadioWidget, Widget
class CaptchaFieldML(Text_Field):
multilingual = False
###########################################################################
# ReCaptcha
###########################################################################
class RecaptchaWidget(Widget):
title = MSG(u"Please enter the words below")
public_key = None
template = make_stl_template(
"""
<input type="hidden" name="${name}" value="Check"/>
<script type="text/javascript">
var RecaptchaOptions = {
theme : 'clean'
};
</script>
<script type="text/javascript"
src="http://api.recaptcha.net/challenge?k=${public_key}"/>
<noscript>
<iframe src="http://api.recaptcha.net/noscript?k=${public_key}"
height="300" width="500" frameborder="0"/><br/>
<textarea name="recaptcha_challenge_field" rows="3" cols="40"/>
<input type='hidden' name='recaptcha_response_field'
value='manual_challenge'/>
</noscript>
""")
class RecaptchaDatatype(String):
mandatory = True
private_key = None
def is_valid(cls, value):
context = get_context()
if getattr(context, 'recaptcha_return_code', None) == 'true':
return True
# Get remote ip
remote_ip = context.get_remote_ip() or '127.0.0.1'
# Get Captcha fields
recaptcha_challenge_field = context.get_form_value(
'recaptcha_challenge_field', type=String)
recaptcha_response_field = context.get_form_value(
'recaptcha_response_field', type=String)
# Test if captcha value is valid
params = urllib.urlencode ({
'privatekey': cls.private_key,
'remoteip' : remote_ip,
'challenge': recaptcha_challenge_field,
'response' : recaptcha_response_field,
})
request = urllib2.Request (
url = "http://api-verify.recaptcha.net/verify",
data = params,
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "reCAPTCHA Python"
}
)
httpresp = urllib2.urlopen (request)
return_values = httpresp.read ().splitlines ();
httpresp.close();
context.recaptcha_return_code = return_code = return_values[0]
return return_code == 'true'
class Captcha_Recaptcha(DBResource):
class_id = 'config-captcha-recaptcha'
class_title = MSG(u'Recaptcha')
class_views = ['edit']
# Fields
public_key = CaptchaFieldML(title=MSG(u"Recaptcha public key"))
private_key = CaptchaFieldML(title=MSG(u"Recaptcha private key"))
# Views
edit = AutoEdit(fields=['public_key', 'private_key'])
# API
def get_widget(self):
return RecaptchaWidget(public_key=self.get_value('public_key'))
def get_datatype(self):
return RecaptchaDatatype(private_key=self.get_value('private_key'))
###########################################################################
# Question Captcha
###########################################################################
class QuestionCaptchaDatatype(Unicode):
mandatory = True
answer = None
def is_valid(cls, value):
return cls.answer == value
class QuestionCaptchaWidget(TextWidget):
title = MSG(u"Please answer the question below:")
question = None
template = make_stl_template("""
${question}
<input type="text" id="${id}" name="${name}" value="${value}"
maxlength="${maxlength}" size="${size}" />""")
class Captcha_Question(DBResource):
class_id = 'config-captcha-question'
class_title = MSG(u'Captcha question')
class_views = ['edit']
# Fields
question = CaptchaFieldML(default=u'2 + 3', title=MSG(u"Question"))
answer = CaptchaFieldML(default=u'5', title=MSG(u"Answer"))
# Views
edit = AutoEdit(fields=['question', 'answer'])
# API
def get_widget(self):
return QuestionCaptchaWidget(question=self.get_value('question'))
def get_datatype(self):
return QuestionCaptchaDatatype(answer=self.get_value('answer'))
###########################################################################
# CaptchaWidget
###########################################################################
class CaptchaDatatype(Unicode):
mandatory = True
def is_valid(cls, value):
root = get_context().root
config_captcha = root.get_resource('config/captcha')
captcha = config_captcha.get_captcha()
datatype = captcha.get_datatype()
return datatype.is_valid(value)
class CaptchaWidget(Widget):
@proto_lazy_property
def title(self):
root = get_context().root
config_captcha = root.get_resource('config/captcha')
captcha = config_captcha.get_captcha()
widget = captcha.get_widget()
return widget.title
def render(self, mode='events'):
root = get_context().root
config_captcha = root.get_resource('config/captcha')
captcha = config_captcha.get_captcha()
widget = captcha.get_widget()
return widget(name=self.name, value=self.value).render()
class Captcha_Field(Field):
required = True
datatype = CaptchaDatatype
widget = CaptchaWidget
###########################################################################
# Captcha Config
###########################################################################
class Select_CaptchaWidget(RadioWidget):
template = make_stl_template("""
<stl:block stl:repeat="option options">
<input type="radio" id="${id}-${option/name}" name="${name}"
value="${option/name}" checked="${option/selected}" />
<label for="${id}-${option/name}">
<a href="${option/name}">
${option/value}
</a>
</label>
<br stl:if="not oneline" />
</stl:block>""")
class CaptchaType(Enumerate):
default = 'question'
options = [
{'name': 'question', 'value': MSG(u'Question captcha')},
{'name': 'recaptcha', 'value': MSG(u'Recaptcha')}]
class Captcha(Folder):
class_id = 'config-captcha'
class_title = MSG(u'Captcha')
class_description = MSG(u'Feature to protect from spammers')
class_icon48 = '/ui/ikaaro/icons/48x48/captcha.png'
# Fields
captcha_type = Select_Field(
required=True, title=MSG(u"Captcha type"), datatype=CaptchaType,
widget = Select_CaptchaWidget(has_empty_option=False))
# Views
class_views = ['edit']
edit = AutoEdit(title=MSG(u'Edit captcha'), fields=['captcha_type'])
# Configuration
config_name = 'captcha'
config_group = 'access'
def init_resource(self, **kw):
super(Captcha, self).init_resource(**kw)
# Init several captcha
self.make_resource('question', Captcha_Question)
self.make_resource('recaptcha', Captcha_Recaptcha)
def get_captcha(self):
captcha_type = self.get_value('captcha_type')
return self.get_resource(captcha_type)
# Register captcha
Configuration.register_module(Captcha)
|
hforge/ikaaro
|
ikaaro/config_captcha.py
|
Python
|
gpl-3.0
| 8,495
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import flavors as flavors_api
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
ALIAS = "flavor-manage"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class FlavorManageController(wsgi.Controller):
"""
The Flavor Lifecycle API controller for the OpenStack API.
"""
_view_builder_class = flavors_view.V3ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
@extensions.expected_errors((404))
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=204)
@wsgi.action("create")
@extensions.expected_errors((400, 409))
@wsgi.serializers(xml=flavors_api.FlavorTemplate)
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'flavor'):
raise webob.exc.HTTPBadRequest('Invalid request body ')
vals = body['flavor']
name = vals.get('name')
flavorid = vals.get('id')
memory = vals.get('ram')
vcpus = vals.get('vcpus')
root_gb = vals.get('disk')
ephemeral_gb = vals.get('ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
if not flavor['is_public']:
flavors.add_flavor_access(flavor['flavorid'],
context.project_id, context)
req.cache_db_flavor(flavor)
except (exception.InstanceTypeExists,
exception.InstanceTypeIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
return self._view_builder.show(req, flavor)
class FlavorManage(extensions.V3APIExtensionBase):
"""
Flavor create/delete API support
"""
name = "FlavorManage"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/core/%s/api/v3" % ALIAS
version = 1
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
def get_resources(self):
return []
|
ntt-sic/nova
|
nova/api/openstack/compute/plugins/v3/flavor_manage.py
|
Python
|
apache-2.0
| 3,903
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.