repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
panosl/django-attachments | attachments/views.py | 2 | 2106 | from django.shortcuts import render_to_response, get_object_or_404
from django.views.decorators.http import require_POST
from django.http import HttpResponseRedirect
from django.db.models.loading import get_model
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext, ugettext_lazy as _
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from attachments.models import Attachment
from attachments.forms import AttachmentForm
def add_url_for_obj(obj):
return reverse('add_attachment', kwargs={
'app_label': obj._meta.app_label,
'module_name': obj._meta.module_name,
'pk': obj.pk
})
@require_POST
@login_required
def add_attachment(request, app_label, module_name, pk,
template_name='attachments/add.html', extra_context={}):
next = request.POST.get('next', '/')
model = get_model(app_label, module_name)
if model is None:
return HttpResponseRedirect(next)
obj = get_object_or_404(model, pk=pk)
form = AttachmentForm(request.POST, request.FILES)
if form.is_valid():
form.save(request, obj)
request.user.message_set.create(message=ugettext('Your attachment was uploaded.'))
return HttpResponseRedirect(next)
else:
template_context = {
'form': form,
'form_url': add_url_for_obj(obj),
'next': next,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
RequestContext(request))
@login_required
def delete_attachment(request, attachment_pk):
g = get_object_or_404(Attachment, pk=attachment_pk)
if request.user.has_perm('delete_foreign_attachments') \
or request.user == g.creator:
g.delete()
request.user.message_set.create(message=ugettext('Your attachment was deleted.'))
next = request.REQUEST.get('next') or '/'
return HttpResponseRedirect(next)
| bsd-3-clause |
Lujeni/ansible | lib/ansible/modules/identity/onepassword_info.py | 19 | 17168 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Ryan Conway (@rylon)
# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
# (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: onepassword_info
author:
- Ryan Conway (@Rylon)
version_added: "2.7"
requirements:
- C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
notes:
- Tested with C(op) version 0.5.5
- "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
- When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
data could be stored in clear text on disk or in a database.
short_description: Gather items from 1Password
description:
- M(onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
- A fatal error occurs if any of the items being searched for can not be found.
- Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
- This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(onepassword_info) module no longer returns C(ansible_facts)!
You must now use the C(register) option to use the facts in other tasks.
options:
search_terms:
type: list
description:
- A list of one or more search terms.
- Each search term can either be a simple string or it can be a dictionary for more control.
- When passing a simple string, I(field) is assumed to be C(password).
- When passing a dictionary, the following fields are available.
suboptions:
name:
type: str
description:
- The name of the 1Password item to search for (required).
field:
type: str
description:
- The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
section:
type: str
description:
- The name of a section within this item containing the specified field (optional, will search all sections if not specified).
vault:
type: str
description:
- The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
required: True
auto_login:
type: dict
description:
- A dictionary containing authentication details. If this is set, M(onepassword_info) will attempt to sign in to 1Password automatically.
- Without this option, you must have already logged in via the 1Password CLI before running Ansible.
- It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
the Ansible Vault is equal to or greater in strength than the 1Password master password.
suboptions:
subdomain:
type: str
description:
- 1Password subdomain name (<subdomain>.1password.com).
- If this is not specified, the most recent subdomain will be used.
username:
type: str
description:
- 1Password username.
- Only required for initial sign in.
master_password:
type: str
description:
- The master password for your subdomain.
- This is always required when specifying C(auto_login).
required: True
secret_key:
type: str
description:
- The secret key for your subdomain.
- Only required for initial sign in.
default: {}
required: False
cli_path:
type: path
description: Used to specify the exact path to the C(op) command line interface
required: False
default: 'op'
'''
EXAMPLES = '''
# Gather secrets from 1Password, assuming there is a 'password' field:
- name: Get a password
onepassword_info:
search_terms: My 1Password item
delegate_to: localhost
register: my_1password_item
no_log: true # Don't want to log the secrets to the console!
# Gather secrets from 1Password, with more advanced search terms:
- name: Get a password
onepassword_info:
search_terms:
- name: My 1Password item
field: Custom field name # optional, defaults to 'password'
section: Custom section name # optional, defaults to 'None'
vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
delegate_to: localhost
register: my_1password_item
no_log: True # Don't want to log the secrets to the console!
# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
# second, 'Custom field name' is fetched, as that is specified explicitly.
- name: Get a password
onepassword_info:
search_terms:
- My 1Password item # 'name' is optional when passing a simple string...
- name: My Other 1Password item # ...but it can also be set for consistency
- name: My 1Password item
field: Custom field name # optional, defaults to 'password'
section: Custom section name # optional, defaults to 'None'
vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
- name: A 1Password item with document attachment
delegate_to: localhost
register: my_1password_item
no_log: true # Don't want to log the secrets to the console!
- name: Debug a password (for example)
debug:
msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
'''
RETURN = '''
---
# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
# This shows the response you would expect to receive from the third example documented above.
onepassword:
description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
returned: success
type: dict
sample:
"My 1Password item":
password: the value of this field
Custom field name: the value of this field
"My Other 1Password item":
password: the value of this field
"A 1Password item with document attachment":
document: the contents of the document attached to this item
'''
import errno
import json
import os
import re
from subprocess import Popen, PIPE
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.basic import AnsibleModule
class AnsibleModuleError(Exception):
def __init__(self, results):
self.results = results
def __repr__(self):
return self.results
class OnePasswordInfo(object):
def __init__(self):
self.cli_path = module.params.get('cli_path')
self.config_file_path = '~/.op/config'
self.auto_login = module.params.get('auto_login')
self.logged_in = False
self.token = None
terms = module.params.get('search_terms')
self.terms = self.parse_search_terms(terms)
def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
if self.token:
# Adds the session token to all commands if we're logged in.
args += [to_bytes('--session=') + self.token]
command = [self.cli_path] + args
p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(input=command_input)
rc = p.wait()
if not ignore_errors and rc != expected_rc:
raise AnsibleModuleError(to_native(err))
return rc, out, err
def _parse_field(self, data_json, item_id, field_name, section_title=None):
data = json.loads(data_json)
if ('documentAttributes' in data['details']):
# This is actually a document, let's fetch the document data instead!
document = self._run(["get", "document", data['overview']['title']])
return {'document': document[1].strip()}
else:
# This is not a document, let's try to find the requested field
# Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
# not inside it, so we need to check there first.
if (field_name in data['details']):
return {field_name: data['details'][field_name]}
# Otherwise we continue looking inside the 'fields' attribute for the specified field.
else:
if section_title is None:
for field_data in data['details'].get('fields', []):
if field_data.get('name', '').lower() == field_name.lower():
return {field_name: field_data.get('value', '')}
# Not found it yet, so now lets see if there are any sections defined
# and search through those for the field. If a section was given, we skip
# any non-matching sections, otherwise we search them all until we find the field.
for section_data in data['details'].get('sections', []):
if section_title is not None and section_title.lower() != section_data['title'].lower():
continue
for field_data in section_data.get('fields', []):
if field_data.get('t', '').lower() == field_name.lower():
return {field_name: field_data.get('v', '')}
# We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
def parse_search_terms(self, terms):
processed_terms = []
for term in terms:
if not isinstance(term, dict):
term = {'name': term}
if 'name' not in term:
module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
term['field'] = term.get('field', 'password')
term['section'] = term.get('section', None)
term['vault'] = term.get('vault', None)
processed_terms.append(term)
return processed_terms
def get_raw(self, item_id, vault=None):
try:
args = ["get", "item", item_id]
if vault is not None:
args += ['--vault={0}'.format(vault)]
rc, output, dummy = self._run(args)
return output
except Exception as e:
if re.search(".*not found.*", to_native(e)):
module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
else:
module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
def get_field(self, item_id, field, section=None, vault=None):
output = self.get_raw(item_id, vault)
return self._parse_field(output, item_id, field, section) if output != '' else ''
def full_login(self):
if self.auto_login is not None:
if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
module.fail_json(msg='Unable to perform initial sign in to 1Password. '
'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
args = [
'signin',
'{0}.1password.com'.format(self.auto_login['subdomain']),
to_bytes(self.auto_login['username']),
to_bytes(self.auto_login['secret_key']),
'--output=raw',
]
try:
rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
self.token = out.strip()
except AnsibleModuleError as e:
module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
else:
module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
"or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
def get_token(self):
# If the config file exists, assume an initial signin has taken place and try basic sign in
if os.path.isfile(self.config_file_path):
if self.auto_login is not None:
# Since we are not currently signed in, master_password is required at a minimum
if not self.auto_login.get('master_password'):
module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
# Try signing in using the master_password and a subdomain if one is provided
try:
args = ['signin', '--output=raw']
if self.auto_login.get('subdomain'):
args = ['signin', self.auto_login['subdomain'], '--output=raw']
rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
self.token = out.strip()
except AnsibleModuleError:
self.full_login()
else:
self.full_login()
else:
# Attempt a full sign in since there appears to be no existing sign in
self.full_login()
def assert_logged_in(self):
try:
rc, out, err = self._run(['get', 'account'], ignore_errors=True)
if rc == 0:
self.logged_in = True
if not self.logged_in:
self.get_token()
except OSError as e:
if e.errno == errno.ENOENT:
module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
raise e
def run(self):
result = {}
self.assert_logged_in()
for term in self.terms:
value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
if term['name'] in result:
# If we already have a result for this key, we have to append this result dictionary
# to the existing one. This is only applicable when there is a single item
# in 1Password which has two different fields, and we want to retrieve both of them.
result[term['name']].update(value)
else:
# If this is the first result for this key, simply set it.
result[term['name']] = value
return result
def main():
global module
module = AnsibleModule(
argument_spec=dict(
cli_path=dict(type='path', default='op'),
auto_login=dict(type='dict', options=dict(
subdomain=dict(type='str'),
username=dict(type='str'),
master_password=dict(required=True, type='str', no_log=True),
secret_key=dict(type='str', no_log=True),
), default=None),
search_terms=dict(required=True, type='list')
),
supports_check_mode=True
)
results = {'onepassword': OnePasswordInfo().run()}
if module._name == 'onepassword_facts':
module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
"When called with the new name it no longer returns 'ansible_facts'", version='2.13')
module.exit_json(changed=False, ansible_facts=results)
else:
module.exit_json(changed=False, **results)
if __name__ == '__main__':
main()
| gpl-3.0 |
petry/kanboard | apps/core/migrations/0001_initial.py | 1 | 1775 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Board'
db.create_table(u'core_board', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'core', ['Board'])
# Adding model 'Story'
db.create_table(u'core_story', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Board'])),
))
db.send_create_signal(u'core', ['Story'])
def backwards(self, orm):
# Deleting model 'Board'
db.delete_table(u'core_board')
# Deleting model 'Story'
db.delete_table(u'core_story')
models = {
u'core.board': {
'Meta': {'object_name': 'Board'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.story': {
'Meta': {'object_name': 'Story'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Board']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['core'] | apache-2.0 |
thnee/ansible | lib/ansible/modules/cloud/google/gcp_resourcemanager_project_info.py | 13 | 7036 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_resourcemanager_project_info
description:
- Gather info for GCP Project
short_description: Gather info for GCP Project
version_added: '2.8'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a project
gcp_resourcemanager_project_info:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
number:
description:
- Number uniquely identifying the project.
returned: success
type: int
lifecycleState:
description:
- The Project lifecycle state.
returned: success
type: str
name:
description:
- 'The user-assigned display name of the Project. It must be 4 to 30 characters.
Allowed characters are: lowercase and uppercase letters, numbers, hyphen,
single-quote, double-quote, space, and exclamation point.'
returned: success
type: str
createTime:
description:
- Time of creation.
returned: success
type: str
labels:
description:
- The labels associated with this Project.
- 'Label keys must be between 1 and 63 characters long and must conform to the
following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.'
- Label values must be between 0 and 63 characters long and must conform to
the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
- No more than 256 labels can be associated with a given resource.
- Clients should store labels in a representation such as JSON that does not
depend on specific characters being disallowed .
returned: success
type: dict
parent:
description:
- A parent organization.
returned: success
type: complex
contains:
type:
description:
- Must be organization.
returned: success
type: str
id:
description:
- Id of the organization.
returned: success
type: str
id:
description:
- The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase
letters, digits, or hyphens. It must start with a letter.
- Trailing hyphens are prohibited.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://cloudresourcemanager.googleapis.com/v1/projects".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'resourcemanager')
return auth.list(link, return_if_object, array_name='projects')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
shaufi/odoo | openerp/addons/base/ir/ir_http.py | 162 | 8628 | #----------------------------------------------------------
# ir_http modular http routing
#----------------------------------------------------------
import datetime
import hashlib
import logging
import mimetypes
import re
import sys
import werkzeug
import werkzeug.exceptions
import werkzeug.routing
import werkzeug.urls
import werkzeug.utils
import openerp
import openerp.exceptions
import openerp.models
from openerp import http
from openerp.http import request
from openerp.osv import osv, orm
_logger = logging.getLogger(__name__)
UID_PLACEHOLDER = object()
class ModelConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, model=False):
super(ModelConverter, self).__init__(url_map)
self.model = model
self.regex = '([0-9]+)'
def to_python(self, value):
m = re.match(self.regex, value)
return request.registry[self.model].browse(
request.cr, UID_PLACEHOLDER, int(m.group(1)), context=request.context)
def to_url(self, value):
return value.id
class ModelsConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, model=False):
super(ModelsConverter, self).__init__(url_map)
self.model = model
# TODO add support for slug in the form [A-Za-z0-9-] bla-bla-89 -> id 89
self.regex = '([0-9,]+)'
def to_python(self, value):
return request.registry[self.model].browse(request.cr, UID_PLACEHOLDER, [int(i) for i in value.split(',')], context=request.context)
def to_url(self, value):
return ",".join(i.id for i in value)
class ir_http(osv.AbstractModel):
_name = 'ir.http'
_description = "HTTP routing"
def _get_converters(self):
return {'model': ModelConverter, 'models': ModelsConverter}
def _find_handler(self, return_rule=False):
return self.routing_map().bind_to_environ(request.httprequest.environ).match(return_rule=return_rule)
def _auth_method_user(self):
request.uid = request.session.uid
if not request.uid:
raise http.SessionExpiredException("Session expired")
def _auth_method_none(self):
request.uid = None
def _auth_method_public(self):
if not request.session.uid:
dummy, request.uid = self.pool['ir.model.data'].get_object_reference(request.cr, openerp.SUPERUSER_ID, 'base', 'public_user')
else:
request.uid = request.session.uid
def _authenticate(self, auth_method='user'):
try:
if request.session.uid:
try:
request.session.check_security()
# what if error in security.check()
# -> res_users.check()
# -> res_users.check_credentials()
except (openerp.exceptions.AccessDenied, openerp.http.SessionExpiredException):
# All other exceptions mean undetermined status (e.g. connection pool full),
# let them bubble up
request.session.logout(keep_db=True)
getattr(self, "_auth_method_%s" % auth_method)()
except (openerp.exceptions.AccessDenied, openerp.http.SessionExpiredException, werkzeug.exceptions.HTTPException):
raise
except Exception:
_logger.exception("Exception during request Authentication.")
raise openerp.exceptions.AccessDenied()
return auth_method
def _serve_attachment(self):
domain = [('type', '=', 'binary'), ('url', '=', request.httprequest.path)]
attach = self.pool['ir.attachment'].search_read(
request.cr, openerp.SUPERUSER_ID, domain,
['__last_update', 'datas', 'datas_fname', 'name'],
context=request.context)
if attach:
wdate = attach[0]['__last_update']
datas = attach[0]['datas'] or ''
name = attach[0]['name']
if (not datas and name != request.httprequest.path and
name.startswith(('http://', 'https://', '/'))):
return werkzeug.utils.redirect(name, 301)
response = werkzeug.wrappers.Response()
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
try:
response.last_modified = datetime.datetime.strptime(wdate, server_format + '.%f')
except ValueError:
# just in case we have a timestamp without microseconds
response.last_modified = datetime.datetime.strptime(wdate, server_format)
response.set_etag(hashlib.sha1(datas).hexdigest())
response.make_conditional(request.httprequest)
if response.status_code == 304:
return response
response.mimetype = (mimetypes.guess_type(attach[0]['datas_fname'] or '')[0] or
'application/octet-stream')
response.data = datas.decode('base64')
return response
def _handle_exception(self, exception):
# This is done first as the attachment path may
# not match any HTTP controller.
if isinstance(exception, werkzeug.exceptions.HTTPException) and exception.code == 404:
attach = self._serve_attachment()
if attach:
return attach
# If handle_exception returns something different than None, it will be used as a response
try:
return request._handle_exception(exception)
except openerp.exceptions.AccessDenied:
return werkzeug.exceptions.Forbidden()
def _dispatch(self):
# locate the controller method
try:
rule, arguments = self._find_handler(return_rule=True)
func = rule.endpoint
except werkzeug.exceptions.NotFound, e:
return self._handle_exception(e)
# check authentication level
try:
auth_method = self._authenticate(func.routing["auth"])
except Exception as e:
return self._handle_exception(e)
processing = self._postprocess_args(arguments, rule)
if processing:
return processing
# set and execute handler
try:
request.set_handler(func, arguments, auth_method)
result = request.dispatch()
if isinstance(result, Exception):
raise result
except Exception, e:
return self._handle_exception(e)
return result
def _postprocess_args(self, arguments, rule):
""" post process arg to set uid on browse records """
for name, arg in arguments.items():
if isinstance(arg, orm.browse_record) and arg._uid is UID_PLACEHOLDER:
arguments[name] = arg.sudo(request.uid)
try:
arg.exists()
except openerp.models.MissingError:
return self._handle_exception(werkzeug.exceptions.NotFound())
def routing_map(self):
if not hasattr(self, '_routing_map'):
_logger.info("Generating routing map")
cr = request.cr
m = request.registry.get('ir.module.module')
ids = m.search(cr, openerp.SUPERUSER_ID, [('state', '=', 'installed'), ('name', '!=', 'web')], context=request.context)
installed = set(x['name'] for x in m.read(cr, 1, ids, ['name'], context=request.context))
if openerp.tools.config['test_enable']:
installed.add(openerp.modules.module.current_test)
mods = [''] + openerp.conf.server_wide_modules + sorted(installed)
self._routing_map = http.routing_map(mods, False, converters=self._get_converters())
return self._routing_map
def convert_exception_to(to_type, with_message=False):
""" Should only be called from an exception handler. Fetches the current
exception data from sys.exc_info() and creates a new exception of type
``to_type`` with the original traceback.
If ``with_message`` is ``True``, sets the new exception's message to be
the stringification of the original exception. If ``False``, does not
set the new exception's message. Otherwise, uses ``with_message`` as the
new exception's message.
:type with_message: str|bool
"""
etype, original, tb = sys.exc_info()
try:
if with_message is False:
message = None
elif with_message is True:
message = str(original)
else:
message = str(with_message)
raise to_type, message, tb
except to_type, e:
return e
# vim:et:
| agpl-3.0 |
glmcdona/meddle | examples/base/Lib/email/header.py | 255 | 22243 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email.charset import Charset
NL = '\n'
SPACE = ' '
USPACE = u' '
SPACE8 = ' ' * 8
UEMPTYSTRING = u''
MAXLINELEN = 76
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
(?=[ \t]|$) # whitespace or the end of the string
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Find a header embedded in a putative header value. Used to check for
# header injection attack.
_embeded_header = re.compile(r'\n[^ \t]+:')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case string containing the name of the character
set specified in the encoded string.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If no encoding, just return the header
header = str(header)
if not ecre.search(header):
return [(header, None)]
decoded = []
dec = ''
for line in header.splitlines():
# This line might not have an encoding in it
if not ecre.search(line):
decoded.append((line, None))
continue
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if unenc:
# Should we continue a long line?
if decoded and decoded[-1][1] is None:
decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
else:
decoded.append((unenc, None))
if parts:
charset, encoding = [s.lower() for s in parts[0:2]]
encoded = parts[2]
dec = None
if encoding == 'q':
dec = email.quoprimime.header_decode(encoded)
elif encoding == 'b':
paderr = len(encoded) % 4 # Postel's law: add missing padding
if paderr:
encoded += '==='[:4 - paderr]
try:
dec = email.base64mime.decode(encoded)
except binascii.Error:
# Turn this into a higher level exception. BAW: Right
# now we throw the lower level exception away but
# when/if we get exception chaining, we'll preserve it.
raise HeaderParseError
if dec is None:
dec = encoded
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
else:
decoded.append((dec, charset))
del parts[0:3]
return decoded
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicit via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 76.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
# BAW: I believe `chunks' and `maxlinelen' should be non-public.
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
if header_name is None:
# We don't know anything about the field header so the first line
# is the same length as subsequent lines.
self._firstlinelen = maxlinelen
else:
# The first line should be shorter to take into account the field
# header. Also subtract off 2 extra for the colon and space.
self._firstlinelen = maxlinelen - len(header_name) - 2
# Second and subsequent lines should subtract off the length in
# columns of the continuation whitespace prefix.
self._maxlinelen = maxlinelen - cws_expanded_len
def __str__(self):
"""A synonym for self.encode()."""
return self.encode()
def __unicode__(self):
"""Helper for the built-in unicode function."""
uchunks = []
lastcs = None
for s, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(USPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(USPACE)
lastcs = nextcs
uchunks.append(unicode(s, str(charset)))
return UEMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a string, swap the args and do another comparison.
return other == self.encode()
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is true), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In this case, when producing an RFC 2822 compliant header
using RFC 2047 rules, the Unicode string will be encoded using the
following charsets in order: us-ascii, the charset hint, utf-8. The
first character set not to provoke a UnicodeError is used.
Optional `errors' is passed as the third argument to any unicode() or
ustr.encode() call.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
# If the charset is our faux 8bit charset, leave the string unchanged
if charset != '8bit':
# We need to test that the string can be converted to unicode and
# back to a byte string, given the input and output codecs of the
# charset.
if isinstance(s, str):
# Possibly raise UnicodeError if the byte string can't be
# converted to a unicode with the input codec of the charset.
incodec = charset.input_codec or 'us-ascii'
ustr = unicode(s, incodec, errors)
# Now make sure that the unicode could be converted back to a
# byte string with the output codec, which may be different
# than the iput coded. Still, use the original byte string.
outcodec = charset.output_codec or 'us-ascii'
ustr.encode(outcodec, errors)
elif isinstance(s, unicode):
# Now we have to be sure the unicode string can be converted
# to a byte string with a reasonable output codec. We want to
# use the byte string in the chunk.
for charset in USASCII, charset, UTF8:
try:
outcodec = charset.output_codec or 'us-ascii'
s = s.encode(outcodec, errors)
break
except UnicodeError:
pass
else:
assert False, 'utf-8 conversion failed'
self._chunks.append((s, charset))
def _split(self, s, charset, maxlinelen, splitchars):
# Split up a header safely for use with encode_chunks.
splittable = charset.to_splittable(s)
encoded = charset.from_splittable(splittable, True)
elen = charset.encoded_header_len(encoded)
# If the line's encoded length first, just return it
if elen <= maxlinelen:
return [(encoded, charset)]
# If we have undetermined raw 8bit characters sitting in a byte
# string, we really don't know what the right thing to do is. We
# can't really split it because it might be multibyte data which we
# could break if we split it between pairs. The least harm seems to
# be to not split the header at all, but that means they could go out
# longer than maxlinelen.
if charset == '8bit':
return [(s, charset)]
# BAW: I'm not sure what the right test here is. What we're trying to
# do is be faithful to RFC 2822's recommendation that ($2.2.3):
#
# "Note: Though structured field bodies are defined in such a way that
# folding can take place between many of the lexical tokens (and even
# within some of the lexical tokens), folding SHOULD be limited to
# placing the CRLF at higher-level syntactic breaks."
#
# For now, I can only imagine doing this when the charset is us-ascii,
# although it's possible that other charsets may also benefit from the
# higher-level syntactic breaks.
elif charset == 'us-ascii':
return self._split_ascii(s, charset, maxlinelen, splitchars)
# BAW: should we use encoded?
elif elen == len(s):
# We can split on _maxlinelen boundaries because we know that the
# encoding won't change the size of the string
splitpnt = maxlinelen
first = charset.from_splittable(splittable[:splitpnt], False)
last = charset.from_splittable(splittable[splitpnt:], False)
else:
# Binary search for split point
first, last = _binsplit(splittable, charset, maxlinelen)
# first is of the proper length so just wrap it in the appropriate
# chrome. last must be recursively split.
fsplittable = charset.to_splittable(first)
fencoded = charset.from_splittable(fsplittable, True)
chunk = [(fencoded, charset)]
return chunk + self._split(last, charset, self._maxlinelen, splitchars)
def _split_ascii(self, s, charset, firstlen, splitchars):
chunks = _split_ascii(s, firstlen, self._maxlinelen,
self._continuation_ws, splitchars)
return zip(chunks, [charset]*len(chunks))
def _encode_chunks(self, newchunks, maxlinelen):
# MIME-encode a header with many different charsets and/or encodings.
#
# Given a list of pairs (string, charset), return a MIME-encoded
# string suitable for use in a header field. Each pair may have
# different charsets and/or encodings, and the resulting header will
# accurately reflect each setting.
#
# Each encoding can be email.utils.QP (quoted-printable, for
# ASCII-like character sets like iso-8859-1), email.utils.BASE64
# (Base64, for non-ASCII like character sets like KOI8-R and
# iso-2022-jp), or None (no encoding).
#
# Each pair will be represented on a separate line; the resulting
# string will be in the format:
#
# =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
# =?charset2?b?SvxyZ2VuIEL2aW5n?="
chunks = []
for header, charset in newchunks:
if not header:
continue
if charset is None or charset.header_encoding is None:
s = header
else:
s = charset.header_encode(header)
# Don't add more folding whitespace than necessary
if chunks and chunks[-1].endswith(' '):
extra = ''
else:
extra = ' '
_max_append(chunks, s, maxlinelen, extra)
joiner = NL + self._continuation_ws
return joiner.join(chunks)
def encode(self, splitchars=';, '):
"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
This method will do its best to convert the string to the correct
character set used in email, and encode and line wrap it safely with
the appropriate scheme for that character set.
If the given charset is not known or an error occurs during
conversion, this function will return the header untouched.
Optional splitchars is a string containing characters to split long
ASCII lines on, in rough support of RFC 2822's `highest level
syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
"""
newchunks = []
maxlinelen = self._firstlinelen
lastlen = 0
for s, charset in self._chunks:
# The first bit of the next chunk should be just long enough to
# fill the next line. Don't forget the space separating the
# encoded words.
targetlen = maxlinelen - lastlen - 1
if targetlen < charset.encoded_header_len(''):
# Stick it on the next line
targetlen = maxlinelen
newchunks += self._split(s, charset, targetlen, splitchars)
lastchunk, lastcharset = newchunks[-1]
lastlen = lastcharset.encoded_header_len(lastchunk)
value = self._encode_chunks(newchunks, maxlinelen)
if _embeded_header.search(value):
raise HeaderParseError("header value appears to contain "
"an embedded header: {!r}".format(value))
return value
def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
lines = []
maxlen = firstlen
for line in s.splitlines():
# Ignore any leading whitespace (i.e. continuation whitespace) already
# on the line, since we'll be adding our own.
line = line.lstrip()
if len(line) < maxlen:
lines.append(line)
maxlen = restlen
continue
# Attempt to split the line at the highest-level syntactic break
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace.
for ch in splitchars:
if ch in line:
break
else:
# There's nothing useful to split the line on, not even spaces, so
# just append this line unchanged
lines.append(line)
maxlen = restlen
continue
# Now split the line on the character plus trailing whitespace
cre = re.compile(r'%s\s*' % ch)
if ch in ';,':
eol = ch
else:
eol = ''
joiner = eol + ' '
joinlen = len(joiner)
wslen = len(continuation_ws.replace('\t', SPACE8))
this = []
linelen = 0
for part in cre.split(line):
curlen = linelen + max(0, len(this)-1) * joinlen
partlen = len(part)
onfirstline = not lines
# We don't want to split after the field name, if we're on the
# first line and the field name is present in the header string.
if ch == ' ' and onfirstline and \
len(this) == 1 and fcre.match(this[0]):
this.append(part)
linelen += partlen
elif curlen + partlen > maxlen:
if this:
lines.append(joiner.join(this) + eol)
# If this part is longer than maxlen and we aren't already
# splitting on whitespace, try to recursively split this line
# on whitespace.
if partlen > maxlen and ch != ' ':
subl = _split_ascii(part, maxlen, restlen,
continuation_ws, ' ')
lines.extend(subl[:-1])
this = [subl[-1]]
else:
this = [part]
linelen = wslen + len(this[-1])
maxlen = restlen
else:
this.append(part)
linelen += partlen
# Put any left over parts on a line by themselves
if this:
lines.append(joiner.join(this))
return lines
def _binsplit(splittable, charset, maxlinelen):
i = 0
j = len(splittable)
while i < j:
# Invariants:
# 1. splittable[:k] fits for all k <= i (note that we *assume*,
# at the start, that splittable[:0] fits).
# 2. splittable[:k] does not fit for any k > j (at the start,
# this means we shouldn't look at any k > len(splittable)).
# 3. We don't know about splittable[:k] for k in i+1..j.
# 4. We want to set i to the largest k that fits, with i <= k <= j.
#
m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
chunk = charset.from_splittable(splittable[:m], True)
chunklen = charset.encoded_header_len(chunk)
if chunklen <= maxlinelen:
# m is acceptable, so is a new lower bound.
i = m
else:
# m is not acceptable, so final i must be < m.
j = m - 1
# i == j. Invariant #1 implies that splittable[:i] fits, and
# invariant #2 implies that splittable[:i+1] does not fit, so i
# is what we're looking for.
first = charset.from_splittable(splittable[:i], False)
last = charset.from_splittable(splittable[i:], False)
return first, last
| mit |
Ricyteach/parmatter | src/parmatter/parmatter.py | 1 | 6022 | import string
import parse as _parse # avoid potential name conflicts with parse methods
# NOTE: All the Formatter docstrings mostly copied from the string docs page (Formatter does
# not have its own docstrings... <sad_face>).
class Formatter():
'''Re-implementation of `string.Formatter` (using the composition patter) to add docstrings, and so
that child classes can more easily override API methods using super().
In general, the `format`, `vformat`, and `_vformat` methods shouldn't be overridden.'''
def format(self, format_string, *args, **kwargs):
'''The primary API method. Takes a format string and injects an
arbitrary set of positional and keyword arguments using format string
syntax.
Handle formmatting logic by overriding get_value, format_field,
check_unused_args, and others per PEP 3101 and the docs.'''
return string.Formatter.format(self, format_string, *args, **kwargs)
def vformat(self, format_string, args, kwargs):
'''This function does the actual work of formatting. It is exposed as a separate function for cases
where you want to pass in a predefined dictionary of arguments, rather than unpacking and repacking
the dictionary as individual arguments using the *args and **kwargs syntax. vformat() does the work
of breaking up the format string into character data and replacement fields. It calls the various
other methods used by the string formatting API.'''
return string.Formatter.vformat(self, format_string, args, kwargs)
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, auto_arg_index=0):
'''The vformat workhorse.'''
return string.Formatter._vformat(self, format_string, args, kwargs, used_args, recursion_depth, auto_arg_index)
def parse(self, format_string):
'''Loop over the format_string and return an iterable of tuples (literal_text, field_name, format_spec,
conversion). This is used by vformat() to break the string into either literal text, or replacement
fields.
The values in the tuple conceptually represent a span of literal text followed by a single replacement
field. If there is no literal text (which can happen if two replacement fields occur consecutively),
then literal_text will be a zero-length string. If there is no replacement field, then the values of
field_name, format_spec and conversion will be None.'''
return string.Formatter.parse(self, format_string)
def get_field(self, field_name, args, kwargs):
'''Given field_name as returned by parse() (see above), convert it to an object to be formatted.
Returns a tuple (obj, used_key). The default version takes strings of the form defined in PEP 3101,
such as “0[name]” or “label.title”. args and kwargs are as passed in to vformat(). The return value
used_key has the same meaning as the key parameter to get_value().'''
return string.Formatter.get_field(self, field_name, args, kwargs)
def get_value(self, key, args, kwargs):
'''Retrieve a given field value. The key argument will be either an integer or a string.
If it is an integer, it represents the index of the positional argument in args; if it
is a string, then it represents a named argument in kwargs.
The args parameter is set to the list of positional arguments to vformat(), and the kwargs
parameter is set to the dictionary of keyword arguments.
For compound field names, these functions are only called for the first component of the
field name; Subsequent components are handled through normal attribute and indexing operations.
So for example, the field expression ‘0.name’ would cause get_value() to be called with a key
argument of 0. The name attribute will be looked up after get_value() returns by calling the
built-in getattr() function.
If the index or keyword refers to an item that does not exist, then an IndexError or KeyError
should be raised.'''
return string.Formatter.get_value(self, key, args, kwargs)
def check_unused_args(self, used_args, args, kwargs):
'''Implement checking for unused arguments if desired. The arguments to this function is the set
of all argument keys that were actually referred to in the format string (integers for positional
arguments, and strings for named arguments), and a reference to the args and kwargs that was passed
to vformat. The set of unused args can be calculated from these parameters. check_unused_args() is
assumed to raise an exception if the check fails.'''
string.Formatter.check_unused_args(self, used_args, args, kwargs)
def format_field(self, value, format_spec):
'''Simply calls the global format() built-in. Provided so that subclasses can override it.'''
return string.Formatter.format_field(self, value, format_spec)
def convert_field(self, value, conversion):
'''Converts the value (returned by get_field()) given a conversion type (as in the tuple returned by
the parse() method). The default version understands ‘s’ (str), ‘r’ (repr) and ‘a’ (ascii) conversion
types.'''
return string.Formatter.convert_field(self, value, conversion)
class Parmatter(Formatter):
'''A parsing formatter; i.e., a formatter that can also "unformat".
The various string format API methods can be overridden by child classes using super() for convenience.'''
def unformat(self, format, string, extra_types=dict(s=str), evaluate_result=True):
'''Inverse of format. Match my format to the string exactly.
Return a parse.Result or parse.Match instance (or None if there's no match).
'''
return _parse.parse(format, string, extra_types, evaluate_result) | bsd-2-clause |
nemomobile-graveyard/mcompositor | tests/functional/test25.py | 2 | 3087 | #!/usr/bin/python
# Some tests for the splash screen.
#* Test steps
# * show an unmapped application window
# * show a splash screen for it
# * check that the splash screen appeared
# * map the application window
# * check that the splash screen disappeared
# * show a new splash screen with a bogus PID
# * check that the splash screen appeared
# * map a new application window
# * check that the app window is stacked above the splash screen
# * wait for the splash timeout
#* Post-conditions
# * check that the splash screen disappeared
import os, re, sys, time
if os.system('mcompositor-test-init.py'):
sys.exit(1)
fd = os.popen('windowstack m')
s = fd.read(5000)
win_re = re.compile('^0x[0-9a-f]+')
home_win = 0
for l in s.splitlines():
if re.search(' DESKTOP viewable ', l.strip()):
home_win = win_re.match(l.strip()).group()
if home_win == 0:
print 'FAIL: desktop not found'
sys.exit(1)
def check_order(list, test, no_match = 'NO_MATCH'):
global ret
print 'Test:', test
fd = os.popen('windowstack m')
s = fd.read(10000)
i = 0
for l in s.splitlines():
if re.search('%s ' % list[i], l.strip()):
print list[i], 'found'
i += 1
if i >= len(list):
break
continue
elif re.search('%s ' % no_match, l.strip()):
print 'FAIL: "%s" matched in "%s" test' % (no_match, test)
print 'Failed stack:\n', s
ret = 1
return
else:
# no match, check that no other element matches either
for j in range(i, len(list)):
if re.search('%s ' % list[j], l.strip()):
print 'FAIL: stacking order is wrong in "%s" test' % test
print 'Failed stack:\n', s
ret = 1
return
if i < len(list):
print 'FAIL: windows missing from the stack in "%s" test' % test
print 'Failed stack:\n', s
ret = 1
# create unmapped application window
fd = os.popen('windowctl eukn')
app1 = fd.readline().strip()
time.sleep(1)
# show a splash screen for it
pid = os.popen('pidof windowctl').readline().strip()
splash = '/usr/share/mcompositor-functional-tests/splash.jpg'
os.popen('manual-splash %s ignored %s %s ""' % (pid, splash, splash))
time.sleep(2)
ret = 0
check_order(['\'MSplashScreen\'', home_win], 'splash appeared correctly')
# map the application window
os.popen('windowctl M %s' % app1)
time.sleep(1)
# check that splash screen disappeared
check_order([app1, home_win], 'splash disappeared', '\'MSplashScreen\'')
# show a new splash screen with a bogus PID
os.popen('manual-splash %s ignored %s %s ""' % (0, splash, splash))
time.sleep(2)
check_order(['\'MSplashScreen\'', app1, home_win], 'second splash appeared')
# show a new application window
app2 = os.popen('windowctl kn').readline().strip()
time.sleep(1)
check_order([app2, '\'MSplashScreen\'', app1, home_win], 'app2 appeared correctly')
# wait for the splash timeout (2s already waited)
time.sleep(28)
check_order([app2, app1, home_win], 'splash disappeared on timeout',
'\'MSplashScreen\'')
# cleanup
os.popen('pkill windowctl')
time.sleep(1)
sys.exit(ret)
| lgpl-2.1 |
aabadie/scikit-learn | examples/mixture/plot_gmm_selection.py | 95 | 3310 | """
================================
Gaussian Mixture Model Selection
================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
import numpy as np
import itertools
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
normtown/SickRage | lib/requests/adapters.py | 573 | 16810 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| gpl-3.0 |
skavulya/spark-tk | python/sparktk/frame/constructors/import_hive.py | 13 | 2715 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparktk.tkcontext import TkContext
def import_hive(hive_query, tc=TkContext.implicit):
"""
Import data from hive table into frame.
Define the sql query to retrieve the data from a hive table.
Only a subset of Hive data types are supported:
DataType Support
---------- ------------------------------------
boolean cast to int
bigint native support
int native support
tinyint cast to int
smallint cast to int
decimal cast to double, may lose precision
double native support
float native support
date cast to string
string native support
timestamp cast to string
varchar cast to string
arrays not supported
binary not supported
char not supported
maps not supported
structs not supported
union not supported
Parameters
----------
:param hive_query: (str) hive query to fetch data from table
:param tc: (TkContext) TK context
:return: (Frame) returns frame with hive table data
Examples
--------
Load data into frame from a hive table based on hive query
<skip>
>>> h_query = "select * from demo_test"
>>> frame = tc.frame.import_hive(h_query)
-etc-
>>> frame.inspect()
[#] number strformat
======================
[0] 1 one
[1] 2 two
[2] 3 three
[3] 4 four
</skip>
"""
if not isinstance(hive_query, basestring):
raise ValueError("hive query parameter must be a string, but is {0}.".format(type(hive_query)))
TkContext.validate(tc)
scala_frame = tc.sc._jvm.org.trustedanalytics.sparktk.frame.internal.constructors.Import.importHive(tc.jutils.get_scala_sc(), hive_query)
from sparktk.frame.frame import Frame
return Frame(tc, scala_frame)
| apache-2.0 |
Antiun/connector-ecommerce | connector_ecommerce/invoice.py | 4 | 1863 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joel Grand-Guillaume
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
from openerp.addons.connector.session import ConnectorSession
from .event import on_invoice_paid, on_invoice_validated
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def confirm_paid(self):
res = super(AccountInvoice, self).confirm_paid()
session = ConnectorSession(self.env.cr, self.env.uid,
context=self.env.context)
for record_id in self.ids:
on_invoice_paid.fire(session, self._name, record_id)
return res
@api.multi
def invoice_validate(self):
res = super(AccountInvoice, self).invoice_validate()
session = ConnectorSession(self.env.cr, self.env.uid,
context=self.env.context)
for record_id in self.ids:
on_invoice_validated.fire(session, self._name, record_id)
return res
| agpl-3.0 |
Ms2ger/presto-testo | core/standards/scripts/jstest-futhark/demos/inspector/inspector.py | 8 | 4489 | #!/usr/local/bin/python
from httplib import HTTP
from sys import argv, exit
from os import tmpnam, execl, fork, wait, remove
from re import compile
import cgi
form = cgi.FieldStorage()
if not form.has_key("url"):
print "Content-Type: text/html"
print
print "<html><body><form method='GET' action='inspector.py'>URL to inspect: <input name='url'></form></body></html>"
else:
url_re = compile("http://(?P<host>[^/]*)(?P<path>/.*)?")
match = url_re.match(form["url"].value)
if not match:
print "Content-Type: text/html"
print
print "<html><body>Malformed URL.</body></html>"
else:
host = match.group("host")
path = match.group("path")
http = HTTP(host)
http.putrequest("GET", path)
http.putheader("Host", host)
http.endheaders()
errcode, errmsg, headers = http.getreply()
if errcode != 200:
print "Content-Type: text/html"
print
print "<html><body>Failed to load URL: %d</body></html>" % errcode
else:
if headers.has_key("Content-Type"):
print "Content-Type: " + headers["Content-Type"]
else:
print "Content-Type: text/plain"
print
file = http.getfile()
whole_body_re = compile("(?P<before>.*)(?P<body>\\<[bB][oO][dD][yY]( [^>]*)?\\>)(?P<after>.*)")
begin_body_re = compile("(?P<before>.*)(?P<body>\\<[bB][oO][dD][yY])(?P<after>.*)")
end_body_re = compile("(?P<before>.*)(?P<body>\\>)(?P<after>.*)")
whole_head_re = compile("(?P<before>.*)(?P<head>\\<[hH][eE][aA][dD]( [^>]*)?\\>)(?P<after>.*)")
begin_head_re = compile("(?P<before>.*)(?P<head>\\<[hH][eE][aA][dD])(?P<after>.*)")
end_head_re = compile("(?P<before>.*)(?P<head>\\>)(?P<after>.*)")
base_href_re = compile("(http://([^/]*/)*)[^/]*")
body_not_found = 1
body_begin_found = 0
head_not_found = 1
head_begin_found = 0
script = "<script id=\"inspector-script\" src=\"http://YOUR-HOST-HERE/PATH/inspector.js\"></script>"
base = "<base href=\"" + base_href_re.match(form["url"].value).group(1) + "\">"
while 1:
line = file.readline()
if head_not_found:
whole_match_head = whole_head_re.match(line)
begin_match_head = begin_head_re.match(line)
end_match_head = end_head_re.match(line)
if body_not_found:
whole_match_body = whole_body_re.match(line)
begin_match_body = begin_body_re.match(line)
end_match_body = end_body_re.match(line)
if not line:
break
elif whole_match_head and head_not_found:
print whole_match_head.group("before") + whole_match_head.group("head") + base + whole_match_head.group("after")
head_not_found = 0
elif begin_match_head and head_not_found and not head_begin_found:
print line
head_begin_found = 1
elif head_begin_found and head_not_found and end_match_head:
print end_match_head.group("before") + end_match_head.group("head") + base + end_match_head.group("after")
head_not_found = 0
elif whole_match_body and body_not_found:
html = script
if head_not_found:
html += base
print whole_match_body.group("before") + whole_match_body.group("body") + html + whole_match_body.group("after")
body_not_found = 0
head_not_found = 0
elif begin_match_body and body_not_found and not body_begin_found:
print line
body_begin_found = 1
elif body_begin_found and body_not_found and end_match_body:
html = script
if head_not_found:
html += base
print end_match_body.group("before") + end_match_body.group("body") + html + end_match_body.group("after")
body_not_found = 0
head_not_found = 0
else:
print line
| bsd-3-clause |
peterfpeterson/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ReflectometryILLConvertToQ.py | 3 | 12919 | # -*- coding: utf-8 -*-# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import ILL_utilities as utils
from mantid.api import (AlgorithmFactory, DataProcessorAlgorithm, MatrixWorkspaceProperty, WorkspaceUnitValidator)
from mantid.kernel import (Direction, FloatBoundedValidator, Property, StringListValidator)
from mantid.simpleapi import (ConvertToPointData, CreateWorkspace, Divide, GroupToXResolution, Multiply,
ReflectometryMomentumTransfer)
import ReflectometryILL_common as common
import scipy.constants as constants
class Prop:
CLEANUP = 'Cleanup'
DIRECT_FOREGROUND_WS = 'DirectForegroundWorkspace'
GROUPING_FRACTION = 'GroupingQFraction'
INPUT_WS = 'InputWorkspace'
OUTPUT_WS = 'OutputWorkspace'
SUBALG_LOGGING = 'SubalgorithmLogging'
class SubalgLogging:
OFF = 'Logging OFF'
ON = 'Logging ON'
class ReflectometryILLConvertToQ(DataProcessorAlgorithm):
def category(self):
"""Return algorithm's categories."""
return 'ILL\\Reflectometry;Workflow\\Reflectometry'
def name(self):
"""Return the name of the algorithm."""
return 'ReflectometryILLConvertToQ'
def summary(self):
"""Return a summary of the algorithm."""
return 'Converts a reflectivity workspace from wavelength to momentum transfer.'
def seeAlso(self):
"""Return a list of related algorithm names."""
return ['ReflectometryILLPolarizationCor', 'ReflectometryILLPreprocess', 'ReflectometryILLSumForeground',
'ReflectometryMomentumTransfer', 'ReflectometryILLAutoProcess']
def version(self):
"""Return the version of the algorithm."""
return 1
def PyExec(self):
"""Execute the algorithm."""
self._subalgLogging = self.getProperty(Prop.SUBALG_LOGGING).value == SubalgLogging.ON
cleanupMode = self.getProperty(Prop.CLEANUP).value
self._cleanup = utils.Cleanup(cleanupMode, self._subalgLogging)
wsPrefix = self.getPropertyValue(Prop.OUTPUT_WS)
self._names = utils.NameSource(wsPrefix, cleanupMode)
ws, directWS = self._inputWS()
ws = self._correctForChopperOpenings(ws, directWS)
ws = self._convertToMomentumTransfer(ws)
sumInLambda = self._sumType(ws.run()) == 'SumInLambda'
if sumInLambda:
directWS = self._sameQAndDQ(ws, directWS, 'direct_')
ws = self._toPointData(ws)
ws = self._groupPoints(ws)
if sumInLambda:
directWS = self._toPointData(directWS, 'direct_')
directWS = self._groupPoints(directWS, 'direct_')
ws = self._divideByDirect(ws, directWS)
self._finalize(ws)
def PyInit(self):
"""Initialize the input and output properties of the algorithm."""
positiveFloat = FloatBoundedValidator(lower=0., exclusive=True)
self.declareProperty(
MatrixWorkspaceProperty(
Prop.INPUT_WS,
defaultValue='',
direction=Direction.Input,
validator=WorkspaceUnitValidator('Wavelength')),
doc='A reflectivity workspace in wavelength to be converted to Q.')
self.declareProperty(
MatrixWorkspaceProperty(
Prop.OUTPUT_WS,
defaultValue='',
direction=Direction.Output),
doc='The input workspace in momentum transfer.')
self.declareProperty(
Prop.SUBALG_LOGGING,
defaultValue=SubalgLogging.OFF,
validator=StringListValidator([SubalgLogging.OFF, SubalgLogging.ON]),
doc='Enable or disable child algorithm logging.')
self.declareProperty(
Prop.CLEANUP,
defaultValue=utils.Cleanup.ON,
validator=StringListValidator([utils.Cleanup.ON, utils.Cleanup.OFF]),
doc='Enable or disable intermediate workspace cleanup.')
self.declareProperty(
MatrixWorkspaceProperty(
Prop.DIRECT_FOREGROUND_WS,
defaultValue='',
direction=Direction.Input,
validator=WorkspaceUnitValidator('Wavelength')),
doc='Summed direct beam workspace.')
self.declareProperty(
Prop.GROUPING_FRACTION,
defaultValue=Property.EMPTY_DBL,
validator=positiveFloat,
doc='If set, group the output by steps of this fraction multiplied by Q resolution')
def validateInputs(self):
"""Validate the input properties."""
issues = dict()
inputWS = self.getProperty(Prop.INPUT_WS).value
if inputWS.getNumberHistograms() != 1:
issues[Prop.INPUT_WS] = 'The workspace should have only a single histogram. Was foreground summation forgotten?'
directWS = self.getProperty(Prop.DIRECT_FOREGROUND_WS).value
if directWS.getNumberHistograms() != 1:
issues[Prop.DIRECT_FOREGROUND_WS] = 'The workspace should have only a single histogram. Was foreground summation forgotten?'
run = inputWS.run()
if not run.hasProperty(common.SampleLogs.SUM_TYPE):
issues[Prop.INPUT_WS] = "'" + common.SampleLogs.SUM_TYPE + "' entry missing in sample logs"
else:
sumType = run.getProperty(common.SampleLogs.SUM_TYPE).value
if sumType not in ['SumInLambda', 'SumInQ']:
issues[Prop.INPUT_WS] = "Unknown sum type in sample logs: '" + sumType + "'. Allowed values: 'SumInLambda' or 'SumInQ'."
else:
if sumType == 'SumInLambda':
if directWS.blocksize() != inputWS.blocksize():
issues[Prop.DIRECT_FOREGROUND_WS] = 'Number of bins does not match with InputWorkspace.'
directXs = directWS.readX(0)
inputXs = inputWS.readX(0)
if directXs[0] != inputXs[0] or directXs[-1] != inputXs[-1]:
issues[Prop.DIRECT_FOREGROUND_WS] = 'Binning does not match with InputWorkspace.'
return issues
def _convertToMomentumTransfer(self, ws):
"""Convert the X units of ws to momentum transfer."""
logs = ws.run()
reflectedForeground = self._foreground(logs)
instrumentName = common.instrumentName(ws)
sumType = logs.getProperty(common.SampleLogs.SUM_TYPE).value
pixelSize = common.pixelSize(instrumentName)
detResolution = common.detectorResolution()
chopperSpeed = common.chopperSpeed(logs, instrumentName)
chopperOpening = common.chopperOpeningAngle(logs, instrumentName)
chopperRadius = 0.36 if instrumentName == 'D17' else 0.305
chopperPairDist = common.chopperPairDistance(logs, instrumentName)
tofBinWidth = self._TOFChannelWidth(logs)
qWSName = self._names.withSuffix('in_momentum_transfer')
qWS = ReflectometryMomentumTransfer(
InputWorkspace=ws,
OutputWorkspace=qWSName,
SummationType=sumType,
ReflectedForeground=reflectedForeground,
PixelSize=pixelSize,
DetectorResolution=detResolution,
ChopperSpeed=chopperSpeed,
ChopperOpening=chopperOpening,
ChopperRadius=chopperRadius,
ChopperPairDistance=chopperPairDist,
FirstSlitName='slit2',
FirstSlitSizeSampleLog=common.SampleLogs.SLIT2WIDTH,
SecondSlitName='slit3',
SecondSlitSizeSampleLog=common.SampleLogs.SLIT3WIDTH,
TOFChannelWidth=tofBinWidth,
EnableLogging=self._subalgLogging)
self._cleanup.cleanup(ws)
return qWS
def _correctForChopperOpenings(self, ws, directWS):
"""Correct reflectivity values if chopper openings between RB and DB differ."""
def opening(instrumentName, logs, Xs):
chopperGap = common.chopperPairDistance(logs, instrumentName)
chopperPeriod = 60. / common.chopperSpeed(logs, instrumentName)
openingAngle = common.chopperOpeningAngle(logs, instrumentName)
return chopperGap * constants.m_n / constants.h / chopperPeriod * Xs * 1e-10 + openingAngle / 360.
instrumentName = common.instrumentName(ws)
Xbins = ws.readX(0)
Xs = (Xbins[:-1] + Xbins[1:]) / 2.
reflectedOpening = opening(instrumentName, ws.run(), Xs)
directOpening = opening(instrumentName, directWS.run(), Xs)
corFactorWSName = self._names.withSuffix('chopper_opening_correction_factors')
corFactorWS = CreateWorkspace(
OutputWorkspace=corFactorWSName,
DataX=Xbins,
DataY=directOpening / reflectedOpening,
UnitX=ws.getAxis(0).getUnit().unitID(),
ParentWorkspace=ws,
EnableLogging=self._subalgLogging)
correctedWSName = self._names.withSuffix('corrected_by_chopper_opening')
correctedWS = Multiply(
LHSWorkspace=ws,
RHSWorkspace=corFactorWS,
OutputWorkspace=correctedWSName,
EnableLogging=self._subalgLogging)
self._cleanup.cleanup(corFactorWS)
self._cleanup.cleanup(ws)
return correctedWS
def _finalize(self, ws):
"""Set OutputWorkspace to ws and clean up."""
self.setProperty(Prop.OUTPUT_WS, ws)
self._cleanup.cleanup(ws)
self._cleanup.finalCleanup()
def _divideByDirect(self, ws, directWS):
"""Divide ws by the direct beam."""
reflectivityWSName = self._names.withSuffix('reflectivity')
reflectivityWS = Divide(
LHSWorkspace=ws,
RHSWorkspace=directWS,
OutputWorkspace=reflectivityWSName,
EnableLogging=self._subalgLogging)
self._cleanup.cleanup(directWS)
reflectivityWS.setYUnit('Reflectivity')
reflectivityWS.setYUnitLabel('Reflectivity')
# The X error data is lost in Divide.
reflectivityWS.setDx(0, ws.readDx(0))
self._cleanup.cleanup(ws)
return reflectivityWS
def _foreground(self, sampleLogs):
"""Return a [start, end] list defining the foreground workspace indices."""
start = sampleLogs.getProperty(common.SampleLogs.FOREGROUND_START).value
end = sampleLogs.getProperty(common.SampleLogs.FOREGROUND_END).value
return [start, end]
def _groupPoints(self, ws, extraLabel=''):
"""Group bins by Q resolution."""
if self.getProperty(Prop.GROUPING_FRACTION).isDefault:
return ws
qFraction = self.getProperty(Prop.GROUPING_FRACTION).value
groupedWSName = self._names.withSuffix(extraLabel + 'grouped')
groupedWS = GroupToXResolution(
InputWorkspace=ws,
OutputWorkspace=groupedWSName,
FractionOfDx=qFraction,
EnableLogging=self._subalgLogging)
self._cleanup.cleanup(ws)
return groupedWS
def _inputWS(self):
"""Return the input workspace."""
ws = self.getProperty(Prop.INPUT_WS).value
self._cleanup.protect(ws)
directWS = self.getProperty(Prop.DIRECT_FOREGROUND_WS).value
self._cleanup.protect(directWS)
return ws, directWS
def _sameQAndDQ(self, ws, directWS, extraLabel=''):
"""Create a new workspace with Y and E from directWS and X and DX data from ws."""
qWSName = self._names.withSuffix(extraLabel + 'in_momentum_transfer')
qWS = CreateWorkspace(
OutputWorkspace=qWSName,
DataX=ws.readX(0),
DataY=directWS.readY(0)[::-1], # Invert data because wavelength is inversely proportional to Q.
DataE=directWS.readE(0)[::-1],
Dx=ws.readDx(0),
UnitX=ws.getAxis(0).getUnit().unitID(),
ParentWorkspace=directWS,
EnableLogging=self._subalgLogging)
return qWS
def _sumType(self, logs):
"""Return the sum type applied to ws."""
return logs.getProperty(common.SampleLogs.SUM_TYPE).value
def _TOFChannelWidth(self, sampleLogs):
"""Return the time of flight bin width."""
return sampleLogs.getProperty('PSD.time_of_flight_0').value
def _toPointData(self, ws, extraLabel=''):
"""Convert ws from binned to point data."""
pointWSName = self._names.withSuffix(extraLabel + 'as_points')
pointWS = ConvertToPointData(
InputWorkspace=ws,
OutputWorkspace=pointWSName,
EnableLogging=self._subalgLogging)
self._cleanup.cleanup(ws)
return pointWS
AlgorithmFactory.subscribe(ReflectometryILLConvertToQ)
| gpl-3.0 |
christisall/TeamTalk | win-client/3rdParty/src/json/devtools/antglob.py | 247 | 7750 | #!/usr/bin/env python
# encoding: utf-8
# Baptiste Lepilleur, 2009
from dircache import listdir
import re
import fnmatch
import os.path
# These fnmatch expressions are used by default to prune the directory tree
# while doing the recursive traversal in the glob_impl method of glob function.
prune_dirs = '.git .bzr .hg .svn _MTN _darcs CVS SCCS '
# These fnmatch expressions are used by default to exclude files and dirs
# while doing the recursive traversal in the glob_impl method of glob function.
##exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split()
# These ant_glob expressions are used by default to exclude files and dirs and also prune the directory tree
# while doing the recursive traversal in the glob_impl method of glob function.
default_excludes = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/_darcs
**/_darcs/**
**/.DS_Store '''
DIR = 1
FILE = 2
DIR_LINK = 4
FILE_LINK = 8
LINKS = DIR_LINK | FILE_LINK
ALL_NO_LINK = DIR | FILE
ALL = DIR | FILE | LINKS
_ANT_RE = re.compile( r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)' )
def ant_pattern_to_re( ant_pattern ):
"""Generates a regular expression from the ant pattern.
Matching convention:
**/a: match 'a', 'dir/a', 'dir1/dir2/a'
a/**/b: match 'a/b', 'a/c/b', 'a/d/c/b'
*.py: match 'script.py' but not 'a/script.py'
"""
rex = ['^']
next_pos = 0
sep_rex = r'(?:/|%s)' % re.escape( os.path.sep )
## print 'Converting', ant_pattern
for match in _ANT_RE.finditer( ant_pattern ):
## print 'Matched', match.group()
## print match.start(0), next_pos
if match.start(0) != next_pos:
raise ValueError( "Invalid ant pattern" )
if match.group(1): # /**/
rex.append( sep_rex + '(?:.*%s)?' % sep_rex )
elif match.group(2): # **/
rex.append( '(?:.*%s)?' % sep_rex )
elif match.group(3): # /**
rex.append( sep_rex + '.*' )
elif match.group(4): # *
rex.append( '[^/%s]*' % re.escape(os.path.sep) )
elif match.group(5): # /
rex.append( sep_rex )
else: # somepath
rex.append( re.escape(match.group(6)) )
next_pos = match.end()
rex.append('$')
return re.compile( ''.join( rex ) )
def _as_list( l ):
if isinstance(l, basestring):
return l.split()
return l
def glob(dir_path,
includes = '**/*',
excludes = default_excludes,
entry_type = FILE,
prune_dirs = prune_dirs,
max_depth = 25):
include_filter = [ant_pattern_to_re(p) for p in _as_list(includes)]
exclude_filter = [ant_pattern_to_re(p) for p in _as_list(excludes)]
prune_dirs = [p.replace('/',os.path.sep) for p in _as_list(prune_dirs)]
dir_path = dir_path.replace('/',os.path.sep)
entry_type_filter = entry_type
def is_pruned_dir( dir_name ):
for pattern in prune_dirs:
if fnmatch.fnmatch( dir_name, pattern ):
return True
return False
def apply_filter( full_path, filter_rexs ):
"""Return True if at least one of the filter regular expression match full_path."""
for rex in filter_rexs:
if rex.match( full_path ):
return True
return False
def glob_impl( root_dir_path ):
child_dirs = [root_dir_path]
while child_dirs:
dir_path = child_dirs.pop()
for entry in listdir( dir_path ):
full_path = os.path.join( dir_path, entry )
## print 'Testing:', full_path,
is_dir = os.path.isdir( full_path )
if is_dir and not is_pruned_dir( entry ): # explore child directory ?
## print '===> marked for recursion',
child_dirs.append( full_path )
included = apply_filter( full_path, include_filter )
rejected = apply_filter( full_path, exclude_filter )
if not included or rejected: # do not include entry ?
## print '=> not included or rejected'
continue
link = os.path.islink( full_path )
is_file = os.path.isfile( full_path )
if not is_file and not is_dir:
## print '=> unknown entry type'
continue
if link:
entry_type = is_file and FILE_LINK or DIR_LINK
else:
entry_type = is_file and FILE or DIR
## print '=> type: %d' % entry_type,
if (entry_type & entry_type_filter) != 0:
## print ' => KEEP'
yield os.path.join( dir_path, entry )
## else:
## print ' => TYPE REJECTED'
return list( glob_impl( dir_path ) )
if __name__ == "__main__":
import unittest
class AntPatternToRETest(unittest.TestCase):
## def test_conversion( self ):
## self.assertEqual( '^somepath$', ant_pattern_to_re( 'somepath' ).pattern )
def test_matching( self ):
test_cases = [ ( 'path',
['path'],
['somepath', 'pathsuffix', '/path', '/path'] ),
( '*.py',
['source.py', 'source.ext.py', '.py'],
['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c'] ),
( '**/path',
['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'],
['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath'] ),
( 'path/**',
['path/a', 'path/path/a', 'path//'],
['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a'] ),
( '/**/path',
['/path', '/a/path', '/a/b/path/path', '/path/path'],
['path', 'path/', 'a/path', '/pathsuffix', '/somepath'] ),
( 'a/b',
['a/b'],
['somea/b', 'a/bsuffix', 'a/b/c'] ),
( '**/*.py',
['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'],
['script.pyc', 'script.pyo', 'a.py/b'] ),
( 'src/**/*.py',
['src/a.py', 'src/dir/a.py'],
['a/src/a.py', '/src/a.py'] ),
]
for ant_pattern, accepted_matches, rejected_matches in list(test_cases):
def local_path( paths ):
return [ p.replace('/',os.path.sep) for p in paths ]
test_cases.append( (ant_pattern, local_path(accepted_matches), local_path( rejected_matches )) )
for ant_pattern, accepted_matches, rejected_matches in test_cases:
rex = ant_pattern_to_re( ant_pattern )
print 'ant_pattern:', ant_pattern, ' => ', rex.pattern
for accepted_match in accepted_matches:
print 'Accepted?:', accepted_match
self.assert_( rex.match( accepted_match ) is not None )
for rejected_match in rejected_matches:
print 'Rejected?:', rejected_match
self.assert_( rex.match( rejected_match ) is None )
unittest.main()
| apache-2.0 |
SeanHayes/python-social-auth | social/tests/actions/test_login.py | 79 | 2639 | from social.tests.models import User
from social.tests.actions.actions import BaseActionTest
class LoginActionTest(BaseActionTest):
def test_login(self):
self.do_login()
def test_login_with_partial_pipeline(self):
self.do_login_with_partial_pipeline()
def test_fields_stored_in_session(self):
self.strategy.set_settings({
'SOCIAL_AUTH_FIELDS_STORED_IN_SESSION': ['foo', 'bar']
})
self.strategy.set_request_data({'foo': '1', 'bar': '2'}, self.backend)
self.do_login()
self.assertEqual(self.strategy.session_get('foo'), '1')
self.assertEqual(self.strategy.session_get('bar'), '2')
def test_redirect_value(self):
self.strategy.set_request_data({'next': '/after-login'}, self.backend)
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/after-login')
def test_login_with_invalid_partial_pipeline(self):
def before_complete():
partial = self.strategy.session_get('partial_pipeline')
partial['backend'] = 'foobar'
self.strategy.session_set('partial_pipeline', partial)
self.do_login_with_partial_pipeline(before_complete)
def test_new_user(self):
self.strategy.set_settings({
'SOCIAL_AUTH_NEW_USER_REDIRECT_URL': '/new-user'
})
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/new-user')
def test_inactive_user(self):
self.strategy.set_settings({
'SOCIAL_AUTH_INACTIVE_USER_URL': '/inactive'
})
User.set_active(False)
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/inactive')
def test_invalid_user(self):
self.strategy.set_settings({
'SOCIAL_AUTH_LOGIN_ERROR_URL': '/error',
'SOCIAL_AUTH_PIPELINE': (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'social.tests.pipeline.remove_user'
)
})
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/error')
| bsd-3-clause |
Vegasvikk/django-cms | cms/test_utils/project/mti_pluginapp/south_migrations/0001_initial.py | 46 | 4071 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TestPluginAlphaModel'
db.create_table(u'mti_pluginapp_testpluginalphamodel', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('alpha', self.gf('django.db.models.fields.CharField')(default='test plugin alpha', max_length=32)),
))
db.send_create_signal(u'mti_pluginapp', ['TestPluginAlphaModel'])
# Adding model 'TestPluginBetaModel'
db.create_table(u'mti_pluginapp_testpluginbetamodel', (
(u'testpluginalphamodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['mti_pluginapp.TestPluginAlphaModel'], unique=True, primary_key=True)),
('beta', self.gf('django.db.models.fields.CharField')(default='test plugin beta', max_length=32)),
))
db.send_create_signal(u'mti_pluginapp', ['TestPluginBetaModel'])
def backwards(self, orm):
# Deleting model 'TestPluginAlphaModel'
db.delete_table(u'mti_pluginapp_testpluginalphamodel')
# Deleting model 'TestPluginBetaModel'
db.delete_table(u'mti_pluginapp_testpluginbetamodel')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'mti_pluginapp.testpluginalphamodel': {
'Meta': {'object_name': 'TestPluginAlphaModel', '_ormbases': ['cms.CMSPlugin']},
'alpha': ('django.db.models.fields.CharField', [], {'default': "'test plugin alpha'", 'max_length': '32'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'mti_pluginapp.testpluginbetamodel': {
'Meta': {'object_name': 'TestPluginBetaModel', '_ormbases': [u'mti_pluginapp.TestPluginAlphaModel']},
'beta': ('django.db.models.fields.CharField', [], {'default': "'test plugin beta'", 'max_length': '32'}),
u'testpluginalphamodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mti_pluginapp.TestPluginAlphaModel']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['mti_pluginapp'] | bsd-3-clause |
izapolsk/integration_tests | cfme/roles.py | 1 | 28996 | from cfme.utils.log import logger
def _remove_page(roles, group, pages):
if group in roles:
for page in pages:
if page in roles[group]:
roles[group].remove(page)
else:
logger.info("Page %s attempted to be removed from role %s, "
"but isn't in there anyway", page, group)
else:
logger.info("Attempted to remove a page from role %s, but role "
"doesn't exist", group)
def _remove_from_all(roles, r_page):
for group in roles:
for page in roles[group]:
if page == r_page:
roles[group].remove(page)
else:
logger.info("Page %s attempted to be removed from role %s, "
"but isn't in there anyway", page, group)
group_data = {
'evmgroup-administrator': [
'control_explorer',
'control_simulation',
'control_import_export',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'clouds_providers',
'clouds_availability_zones',
'clouds_flavors',
'clouds_security_groups',
'clouds_instances',
'clouds_stacks',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'automate_explorer',
'automate_simulation',
'automate_customization',
'automate_import_export',
'automate_log',
'automate_requests',
'my_services',
'services_catalogs',
'services_requests',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-approver': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_requ,ests'
'services_workloads'
],
'evmgroup-auditor': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-desktop': [
'services_requests',
'services_workloads',
'dashboard',
'infrastructure_config_management',
'infrastructure_requests',
'infrastructure_virtual_machines',
'clouds_instances',
'my_settings',
'about'
],
'evmgroup-operator': [
'services_workloads',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'clouds_instances',
'my_settings',
'tasks',
'about'
],
'evmgroup-security': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads'
],
'evmgroup-super_administrator': [
'control_explorer',
'control_simulation',
'control_import_export',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'infrastructure_config_management',
'clouds_providers',
'clouds_availability_zones',
'clouds_flavors',
'clouds_security_groups',
'clouds_instances',
'clouds_tenants',
'clouds_stacks',
'my_settings',
'tasks',
'configuration',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'automate_explorer',
'automate_simulation',
'automate_customization',
'automate_import_export',
'automate_log',
'automate_requests',
'my_services',
'services_catalogs',
'services_requests',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-support': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads'
],
'evmgroup-user': [
'services_workloads',
'services_requests',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_requests',
'clouds_instances',
'my_settings',
'tasks',
'about'
],
'evmgroup-user_limited_self_service': [
'clouds_instances',
'services_requests',
'infrastructure_virtual_machines',
'infrastructure_requests',
'my_settings',
'about'
],
'evmgroup-user_self_service': [
'clouds_instances',
'services_requests',
'infrastructure_config_management',
'infrastructure_virtual_machines',
'infrastructure_requests',
'my_settings',
'about'
],
'evmgroup-vm_user': [
'clouds_instances',
'infrastructure_config_management',
'infrastructure_virtual_machines',
'infrastructure_requests',
'services_requests',
'services_workloads',
'my_settings',
'about'
]
}
# Matches structure/string format of VerticalNavigation output for tree, not UI access control tree
# TODO include non-vertical nav RBAC to settings, help
# TODO RBAC goes deeper than veritcal nav, into accordions. example cloud intel -> Reports
role_access_ui_510z = {
'evmgroup-super_administrator': {
'Cloud Intel': ['Dashboard', 'Reports', 'Chargeback', 'Timelines', 'RSS'],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Tenants',
'Flavors',
'Instances',
'Stacks',
'Key Pairs',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Overview',
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Container Templates',
'Topology'
],
'Migration': [
'Migration Plans',
'Infrastructure Mappings',
'Migration Settings'
]
},
'Configuration': ['Management'],
'Networks': [
'Providers',
'Networks',
'Subnets',
'Network Routers',
'Security Groups',
'Floating IPs',
'Network Ports',
'Load Balancers',
'Topology'
],
'Storage': {
'Block Storage': [
'Managers',
'Volumes',
'Volume Snapshots',
'Volume Backups',
'Volume Types'
],
'Object Storage': [
'Managers',
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Generic Objects',
'Customization',
'Import / Export',
'Log',
'Requests'
]
},
'Optimize': ['Utilization', 'Planning', 'Bottlenecks'],
'Monitor': {
'Alerts': ['Overview', 'All Alerts']
}
},
'evmgroup-administrator': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Jobs', 'Explorer'],
'Automate': ['Log', 'Simulation', 'Import / Export', 'Customization', 'Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Flavors', 'Instances', 'Providers', 'Host Aggregates', 'Availability Zones',
'Stacks', 'Topology'],
'Containers': ['Container Nodes', 'Containers', 'Providers', 'Overview',
'Image Registries', 'Container Builds', 'Container Services',
'Volumes', 'Container Images', 'Routes', 'Pods', 'Replicators',
'Projects', 'Topology'],
'Infrastructure': ['Datastores', 'Networking', 'Providers', 'Virtual Machines', 'Hosts',
'Clusters', 'Topology', 'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Control': ['Import / Export', 'Log', 'Explorer', 'Simulation'],
'Networks': ['Providers', 'Security Groups', 'Floating IPs', 'Networks'],
'Optimize': ['Bottlenecks', 'Planning', 'Utilization'],
'Services': ['Requests', 'Workloads', 'Catalogs', 'My Services'],
'Storage': {
'Object Storage': ['Object Store Containers', 'Object Store Objects']}
},
'evmgroup-approver': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['Requests', 'Workloads', 'My Services'],
},
'evmgroup-auditor': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Networking', 'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Optimize': ['Bottlenecks', 'Planning', 'Utilization'],
'Services': ['Workloads', 'My Services']},
'evmgroup-desktop': {
'Automation': {
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Dashboard'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Services': ['Requests', 'Workloads']
},
'evmgroup-operator': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Services': ['Workloads', 'My Services']
},
'evmgroup-security': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts',
'Clusters', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Servers']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['My Services', 'Workloads']
},
'evmgroup-support': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['My Services', 'Workloads']
},
'evmgroup-user': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Services': ['Requests', 'Workloads', 'My Services']
},
'evmgroup-vm_user': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']},
'Configuration': ['Management'],
'Services': ['Requests', 'Workloads'],
}
}
role_access_ssui = {
'evmgroup-user_limited_self_service': {
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']},
'Services': ['Requests', 'Catalogs', 'My Services']
},
'evmgroup-user_self_service': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': ['Providers']},
'Configuration': ['Management'],
'Services': ['Requests', 'Catalogs', 'My Services']
},
}
role_access_ui_511z = {
'evmgroup-super_administrator': {
'Overview': [
'Dashboard',
'Reports',
'Utilization',
'Chargeback',
'Optimization'
],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Tenants',
'Flavors',
'Instances',
'Stacks',
'Key Pairs',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Firmware Registry',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Overview',
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Container Templates',
'Topology'
]
},
'Migration': [
'Migration Plans',
'Infrastructure Mappings',
'Migration Settings'
],
'Configuration': ['Management'],
'Networks': [
'Providers',
'Networks',
'Subnets',
'Network Routers',
'Security Groups',
'Floating IPs',
'Network Ports',
'Topology'
],
'Storage': {
'Block Storage': [
'Managers',
'Volumes',
'Volume Snapshots',
'Volume Backups',
'Volume Types'
],
'Object Storage': [
'Managers',
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Generic Objects',
'Customization',
'Import / Export',
'Log',
'Requests'
]
},
'Monitor': {
'Alerts': ['Overview', 'All Alerts']
},
},
'evmgroup-administrator': {
'Overview': ['Dashboard', 'Reports', 'Utilization', 'Chargeback'],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Flavors',
'Instances',
'Stacks',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Topology'
]
},
'Configuration': ['Management'],
'Networks': ['Providers', 'Networks', 'Security Groups', 'Floating IPs'],
'Storage': {
'Object Storage': [
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Customization',
'Import / Export',
'Log'
]
}
},
'evmgroup-approver': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-auditor': {
'Overview': ['Dashboard', 'Reports', 'Utilization', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
},
'evmgroup-desktop': {
'Overview': ['Dashboard'],
'Services': ['Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Configuration': ['Management'],
'Automation': {
'Ansible Tower': ['Explorer']
}
},
'evmgroup-operator': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Configuration': ['Management'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
},
'evmgroup-security': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': ['Providers', 'Servers']
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-support': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-user': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
}
},
'evmgroup-vm_user': {
'Services': ['Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']
},
'Configuration': ['Management'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
}
}
| gpl-2.0 |
hujiajie/chromium-crosswalk | tools/telemetry/telemetry/internal/actions/play.py | 32 | 2187 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A Telemetry page_action that performs the "play" action on media elements.
Media elements can be specified by a selector argument. If no selector is
defined then then the action attempts to play the first video element or audio
element on the page. A selector can also be 'all' to play all media elements.
Other arguments to use are: playing_event_timeout_in_seconds and
ended_event_timeout_in_seconds, which forces the action to wait until
playing and ended events get fired respectively.
"""
from telemetry.core import exceptions
from telemetry.internal.actions import media_action
from telemetry.internal.actions import page_action
class PlayAction(media_action.MediaAction):
def __init__(self, selector=None,
playing_event_timeout_in_seconds=0,
ended_event_timeout_in_seconds=0):
super(PlayAction, self).__init__()
self._selector = selector if selector else ''
self._playing_event_timeout_in_seconds = playing_event_timeout_in_seconds
self._ended_event_timeout_in_seconds = ended_event_timeout_in_seconds
def WillRunAction(self, tab):
"""Load the media metrics JS code prior to running the action."""
super(PlayAction, self).WillRunAction(tab)
self.LoadJS(tab, 'play.js')
def RunAction(self, tab):
try:
tab.ExecuteJavaScript('window.__playMedia("%s");' % self._selector)
# Check if we need to wait for 'playing' event to fire.
if self._playing_event_timeout_in_seconds > 0:
self.WaitForEvent(tab, self._selector, 'playing',
self._playing_event_timeout_in_seconds)
# Check if we need to wait for 'ended' event to fire.
if self._ended_event_timeout_in_seconds > 0:
self.WaitForEvent(tab, self._selector, 'ended',
self._ended_event_timeout_in_seconds)
except exceptions.EvaluateException:
raise page_action.PageActionFailed('Cannot play media element(s) with '
'selector = %s.' % self._selector)
| bsd-3-clause |
pixelrebel/st2 | st2api/tests/unit/controllers/v1/test_pack_config_schema.py | 3 | 2064 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import FunctionalTest
__all__ = [
'PackConfigSchemasControllerTestCase'
]
class PackConfigSchemasControllerTestCase(FunctionalTest):
register_packs = True
def test_get_all(self):
resp = self.app.get('/v1/config_schemas')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 4, '/v1/config_schemas did not return all schemas.')
def test_get_one_success(self):
resp = self.app.get('/v1/config_schemas/dummy_pack_1')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json['pack'], 'dummy_pack_1')
self.assertTrue('api_key' in resp.json['attributes'])
def test_get_one_doesnt_exist(self):
# Pack exists, schema doesnt
resp = self.app.get('/v1/config_schemas/dummy_pack_2',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
self.assertTrue('Unable to identify resource with pack_ref ' in resp.json['faultstring'])
# Pack doesn't exist
resp = self.app.get('/v1/config_schemas/pack_doesnt_exist',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
self.assertTrue('Unable to find the PackDB instance' in resp.json['faultstring'])
| apache-2.0 |
glaubitz/fs-uae-debian | launcher/OpenGL/raw/GL/EXT/bindable_uniform.py | 9 | 1275 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_bindable_uniform'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_bindable_uniform',error_checker=_errors._error_checker)
GL_MAX_BINDABLE_UNIFORM_SIZE_EXT=_C('GL_MAX_BINDABLE_UNIFORM_SIZE_EXT',0x8DED)
GL_MAX_FRAGMENT_BINDABLE_UNIFORMS_EXT=_C('GL_MAX_FRAGMENT_BINDABLE_UNIFORMS_EXT',0x8DE3)
GL_MAX_GEOMETRY_BINDABLE_UNIFORMS_EXT=_C('GL_MAX_GEOMETRY_BINDABLE_UNIFORMS_EXT',0x8DE4)
GL_MAX_VERTEX_BINDABLE_UNIFORMS_EXT=_C('GL_MAX_VERTEX_BINDABLE_UNIFORMS_EXT',0x8DE2)
GL_UNIFORM_BUFFER_BINDING_EXT=_C('GL_UNIFORM_BUFFER_BINDING_EXT',0x8DEF)
GL_UNIFORM_BUFFER_EXT=_C('GL_UNIFORM_BUFFER_EXT',0x8DEE)
@_f
@_p.types(_cs.GLint,_cs.GLuint,_cs.GLint)
def glGetUniformBufferSizeEXT(program,location):pass
@_f
@_p.types(_cs.GLintptr,_cs.GLuint,_cs.GLint)
def glGetUniformOffsetEXT(program,location):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLuint)
def glUniformBufferEXT(program,location,buffer):pass
| gpl-2.0 |
mpapierski/protobuf | python/google/protobuf/internal/message_test.py | 224 | 22295 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import operator
import pickle
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import message
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(unittest.TestCase):
def testGoldenMessage(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenExtensions(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleSupport(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
self.assertEquals(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
def testPositiveInfinity(self):
golden_data = ('\x5D\x00\x00\x80\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
'\xCD\x02\x00\x00\x80\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self):
golden_data = ('\x5D\x00\x00\x80\xFF'
'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
'\xCD\x02\x00\x00\x80\xFF'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self):
golden_data = ('\x5D\x00\x00\xC0\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
'\xCD\x02\x00\x00\xC0\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append('a')
message.repeated_bytes.append('c')
message.repeated_bytes.append('b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], 'a')
self.assertEqual(message.repeated_bytes[1], 'b')
self.assertEqual(message.repeated_bytes[2], 'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y)))
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(lambda x,y: cmp(len(x), len(y)))
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
def testRepeatedCompositeFieldSortArguments(self):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self):
"""Check sorting a scalar field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testSortEmptyRepeatedCompositeContainer(self):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = unittest_pb2.TestAllTypes()
m.repeated_nested_message.sort()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
gannetson/django | tests/template_tests/utils.py | 214 | 4372 | # coding: utf-8
from __future__ import unicode_literals
import functools
import os
from django.template.engine import Engine
from django.test.utils import override_settings
from django.utils._os import upath
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
ROOT = os.path.dirname(os.path.abspath(upath(__file__)))
TEMPLATE_DIR = os.path.join(ROOT, 'templates')
def setup(templates, *args, **kwargs):
"""
Runs test method multiple times in the following order:
debug cached string_if_invalid
----- ------ -----------------
False False
False True
False False INVALID
False True INVALID
True False
True True
"""
# when testing deprecation warnings, it's useful to run just one test since
# the message won't be displayed multiple times
test_once = kwargs.get('test_once', False)
for arg in args:
templates.update(arg)
# numerous tests make use of an inclusion tag
# add this in here for simplicity
templates["inclusion.html"] = "{{ result }}"
loaders = [
('django.template.loaders.cached.Loader', [
('django.template.loaders.locmem.Loader', templates),
]),
]
def decorator(func):
# Make Engine.get_default() raise an exception to ensure that tests
# are properly isolated from Django's global settings.
@override_settings(TEMPLATES=None)
@functools.wraps(func)
def inner(self):
# Set up custom template tag libraries if specified
libraries = getattr(self, 'libraries', {})
self.engine = Engine(
allowed_include_roots=[ROOT],
libraries=libraries,
loaders=loaders,
)
func(self)
if test_once:
return
func(self)
self.engine = Engine(
allowed_include_roots=[ROOT],
libraries=libraries,
loaders=loaders,
string_if_invalid='INVALID',
)
func(self)
func(self)
self.engine = Engine(
allowed_include_roots=[ROOT],
debug=True,
libraries=libraries,
loaders=loaders,
)
func(self)
func(self)
return inner
return decorator
# Helper objects
class SomeException(Exception):
silent_variable_failure = True
class SomeOtherException(Exception):
pass
class ShouldNotExecuteException(Exception):
pass
class SomeClass:
def __init__(self):
self.otherclass = OtherClass()
def method(self):
return 'SomeClass.method'
def method2(self, o):
return o
def method3(self):
raise SomeException
def method4(self):
raise SomeOtherException
def method5(self):
raise TypeError
def __getitem__(self, key):
if key == 'silent_fail_key':
raise SomeException
elif key == 'noisy_fail_key':
raise SomeOtherException
raise KeyError
@property
def silent_fail_attribute(self):
raise SomeException
@property
def noisy_fail_attribute(self):
raise SomeOtherException
@property
def attribute_error_attribute(self):
raise AttributeError
class OtherClass:
def method(self):
return 'OtherClass.method'
class TestObj(object):
def is_true(self):
return True
def is_false(self):
return False
def is_bad(self):
raise ShouldNotExecuteException()
class SilentGetItemClass(object):
def __getitem__(self, key):
raise SomeException
class SilentAttrClass(object):
def b(self):
raise SomeException
b = property(b)
@python_2_unicode_compatible
class UTF8Class:
"Class whose __str__ returns non-ASCII data on Python 2"
def __str__(self):
return 'ŠĐĆŽćžšđ'
# These two classes are used to test auto-escaping of unicode output.
@python_2_unicode_compatible
class UnsafeClass:
def __str__(self):
return 'you & me'
@python_2_unicode_compatible
class SafeClass:
def __str__(self):
return mark_safe('you > me')
| bsd-3-clause |
rubikloud/gpdb | src/test/unit/mock/mocker.py | 13 | 10666 | #!/usr/bin/env python
import logging
import optparse
import os
import re
import subprocess
import sys
import special
class CFile(object):
# multi-line comment
m_comment_pat = re.compile(r'/\*.*?\*/', re.DOTALL)
# single-line comment (avoid http:// or postgres://)
s_comment_pat = re.compile(r'(?<!:)//.*$', re.MULTILINE)
# __attribute__((XXX)): it gets difficult to match arguments.
# Remove it as it's a noisy keyword for us.
attribute_pat = re.compile(r'__attribute__\s*\(\((format\s*\([^\)]+\)\s*|format_arg\s*\(\d+\)\s*|.+?)\)\)')
# function pattern
func_pat = re.compile(
# modifier
r'(?:(static|inline|__inline__|__inline)\s+)*' +
# rettype
r'((?:const\s+)?(?:struct\s+|unsigned\s+)?\w+(?:[\s\*]+|\s+))(?:inline\s+|static\s+)?' +
# funcname
r'(\w+)\s*'
# arguments
r'\(([^{}\)]*?)\)\s*{', re.DOTALL)
# static variable pattern
# Currently this requires static keyword at the beginning of line.
###staticvar_pat = re.compile(r'^static.+?;', re.MULTILINE | re.DOTALL)
def __init__(self, path, options):
self.path = os.path.abspath(path)
self.options = options
#with open(self.make_i()) as f:
with open(self.path) as f:
self.content = self.strip(f.read())
def make_i(self):
"""create .i file from .c by using preprocessor with existing make
system. The CPPFLAGS may be different from time/env to time/env.
make will be the best way to preprocess it so far. Note we need
not only header file directory but also some definitions. For
example some debug symbols may not be found in the existing object
files if we didn't pass debug #define.
XXX: Currently we don't need this, but leave it now for future use.
"""
i_path = '{stem}.i'.format(stem=os.path.splitext(self.path)[0])
subprocess.check_call(['make', '--quiet', '-C', self.options.src_dir, i_path])
return i_path
def strip(self, content):
"""strip comments in the content
"""
content = CFile.m_comment_pat.sub('', content)
# backend/libpq/be-secure.c contains private key with '//'
if 'be-secure' not in self.path and 'guc.c' not in self.path:
content = CFile.s_comment_pat.sub('', content)
content = CFile.attribute_pat.sub('', content)
return content
def skip_func_body(self, content, index):
"""Skip function body by finding a line starting with a closing brace.
We wanted to count the number of open/close braces, but some file has
weird code block based on preprocessor directives.
"""
pat = re.compile(r'^}\s*$', re.MULTILINE)
if 'cdbfilerepconnserver' in self.path:
# FIXIT!: some of the files have unpleasant format.
pat = re.compile(r'^ ?}', re.MULTILINE)
m = pat.search(content, index)
if m:
if 'cdbgroup' in self.path:
if content[m.end()+1:].startswith('#endif'):
return self.skip_func_body(content, m.end())
return m.end()
raise StandardError('unexpected syntax')
def to_mock(self):
"""Mock up this file. The basic idea is to replace function body
with mocked up source. Other parts are preserved. Otherwise,
the source code messed up because of preprocessor directives.
"""
content = self.content
prev = 0
result = ''
for (func, m) in self.match_functions():
spos = m.start()
epos = m.end()
result += content[prev:spos]
result += func.to_mock()
prev = self.skip_func_body(content, epos)
result += content[prev:]
return result
def match_functions(self):
"""Iterator of function pattern matching.
"""
content = self.content
for m in CFile.func_pat.finditer(content):
(modifier, rettype, funcname, args) = m.groups('')
# 'else if(...){}' looks like a function. Ignore it.
if funcname in ['if', 'while', 'switch', 'for', 'foreach',
'yysyntax_error', 'defined']:
continue
if rettype.strip() in ['define', 'select']:
continue
func = FuncSignature(modifier, rettype, funcname, args)
yield (func, m)
class MockFile(object):
def __init__(self, cfile, options):
self.cfile = cfile
self.options = options
self.outname = self.output_filename()
def output_filename(self):
"""outname is cdb-pg/src/test/unit/mock/backend/{path}/{stem}_mock.c
"""
src_dir = self.options.src_dir
relpath = os.path.relpath(self.cfile.path, src_dir)
out_dir = self.options.out_dir
out_dir = os.path.join(out_dir, os.path.dirname(relpath))
(stem, ext) = os.path.splitext(os.path.basename(relpath))
if not os.path.exists(out_dir):
try:
os.makedirs(out_dir)
except OSError:
pass
return os.path.join(out_dir, '{stem}_mock.c'.format(stem=stem))
def mock(self):
outname = self.outname
with open(outname, 'w') as f:
f.write("""/*
*
* Auto-generated Mocking Source
*
*/
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include "cmockery.h"
""")
f.write(self.cfile.to_mock())
return
class FuncSignature(object):
# This pattern needs to be fixed; if the argname is not present,
# we need extra space at the end.
arg_pat = re.compile(
# argtype. i.e. 'const unsigned long', 'struct Foo *', 'const char * const'
r'((?:register\s+|const\s+|volatile\s+)*(?:enum\s+|struct\s+|unsigned\s+|long\s+)?' +
r'\w+(?:[\s\*]+)(?:const[\s\*]+)?|\s+)' +
r'(?:__restrict\s+)?' +
# argname. We accept 'arg[]'
r'([\w\[\]]+)?')
Variadic = object()
def __init__(self, modifier, rettype, funcname, args):
self.modifier = modifier.strip()
self.rettype = re.sub('inline', '', rettype).strip()
self.funcname = funcname.strip()
self.args = self.parse_args(args)
def is_local(self):
"""Am I a local function?
"""
return bool(self.modifier)
def is_pointer_type(self, argtype):
"""Is the type pointer?
"""
return argtype[-1] == '*'
def is_variadic(self, arg):
return arg == FuncSignature.Variadic
def parse_args(self, arg_string):
args = []
arg_string = re.sub(r'\s+', ' ', arg_string)
if arg_string == 'void' or arg_string == '':
return args
for (i, arg) in enumerate(arg_string.split(',')):
arg = arg.strip()
# TODO: needs work
if arg == '...':
args.append(FuncSignature.Variadic)
continue
elif arg == 'PG_FUNCTION_ARGS':
args.append(('FunctionCallInfo', 'fcinfo'))
continue
elif arg == 'SIGNAL_ARGS':
args.append(('int', 'signal_args'))
continue
# general case
m = FuncSignature.arg_pat.match(arg.strip())
if not m:
print '%s %s(%s)' % (self.rettype, self.funcname, arg_string)
argtype = m.group(1)
argname = m.group(2) if m.group(2) else 'arg' + str(i)
args.append((argtype.strip(), argname.strip()))
return args
def format_args(self):
buf = []
for arg in self.args:
if self.is_variadic(arg):
buf.append('...')
continue
argtype = arg[0]
argname = arg[1]
buf.append(argtype + ' ' + argname)
if not buf:
buf = ['void']
return ', '.join(buf)
def make_body(self):
body = special.SpecialFuncs.make_body(self)
if body:
return body
subscript = re.compile('\[\d*\]$')
# otherwise, general method
buf = []
# emit check_expected()
for arg in self.args:
if self.is_variadic(arg):
continue
argtype = arg[0]
argname = arg[1]
ref = '&' if special.ByValStructs.has(argtype) else ''
argname = subscript.sub('', argname)
buf.append('\tcheck_expected({ref}{arg});'.format(ref=ref, arg=argname))
# if the type is pointer, call optional_assignment()
for arg in self.args:
if self.is_variadic(arg):
continue
elif not self.is_pointer_type(arg[0]):
continue
ref = '&' if special.ByValStructs.has(argtype) else ''
buf.append('\toptional_assignment({ref}{arg});'.format(ref=ref, arg=argname))
# Currently, local function doesn't check arguments.
if self.is_local():
buf = []
if special.ByValStructs.has(self.rettype):
ret = ('\t{rettype} *ret = ({rettype} *) mock();\n' +
'\treturn *ret;').format(rettype=self.rettype)
elif self.rettype != 'void':
ret = '\treturn ({cast}) mock();'.format(cast=self.rettype)
else:
ret = '\tmock();'
buf.append(ret)
return '\n'.join(buf)
def to_mock(self):
mod_ret = self.rettype
if self.modifier:
mod_ret = self.modifier + ' ' + mod_ret
return """
{mod_ret}
{name}({args})
{{
{body}
}}
""".format(mod_ret=mod_ret, name=self.funcname, args=self.format_args(),
body=self.make_body())
def main():
logging.basicConfig(level=logging.INFO)
try:
mydir = os.path.dirname(os.path.realpath(__file__))
parser = optparse.OptionParser()
parser.add_option('--out-dir',
dest='out_dir',
default=os.path.join(mydir, '.'))
parser.add_option('--src-dir',
dest='src_dir',
default=os.path.join(mydir, '../../..'))
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error('insufficient arguments')
cfile = CFile(args[0], options)
mock = MockFile(cfile, options)
mock.mock()
except Exception as e:
logging.error('Error has occurred during parsing %s: %s' % (args[0], str(e)))
raise
if __name__ == '__main__':
main()
| apache-2.0 |
vadimtk/chrome4sdp | tools/perf/page_sets/tough_canvas_cases.py | 13 | 4031 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ToughCanvasCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughCanvasCasesPage, self).__init__(url=url, page_set=page_set)
self.archive_data_file = 'data/tough_canvas_cases.json'
def RunNavigateSteps(self, action_runner):
super(ToughCanvasCasesPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
"document.readyState == 'complete'")
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('CanvasAnimation'):
action_runner.Wait(5)
class MicrosofFirefliesPage(ToughCanvasCasesPage):
def __init__(self, page_set):
super(MicrosofFirefliesPage, self).__init__(
# pylint: disable=C0301
url='http://ie.microsoft.com/testdrive/Performance/Fireflies/Default.html',
page_set=page_set)
class ToughCanvasCasesPageSet(story.StorySet):
"""
Description: Self-driven Canvas2D animation examples
"""
def __init__(self):
super(ToughCanvasCasesPageSet, self).__init__(
archive_data_file='data/tough_canvas_cases.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
# Crashes on Galaxy Nexus. crbug.com/314131
# self.AddStory(MicrosofFirefliesPage(self))
# Failing on Nexus 5 (http://crbug.com/364248):
# 'http://geoapis.appspot.com/agdnZW9hcGlzchMLEgtFeGFtcGxlQ29kZRjh1wIM',
urls_list = [
'http://mudcu.be/labs/JS1k/BreathingGalaxies.html',
'http://runway.countlessprojects.com/prototype/performance_test.html',
# pylint: disable=C0301
'http://ie.microsoft.com/testdrive/Performance/FishIETank/Default.html',
'http://ie.microsoft.com/testdrive/Performance/SpeedReading/Default.html',
'http://acko.net/dumpx/996b.html',
'http://www.kevs3d.co.uk/dev/canvask3d/k3d_test.html',
'http://www.megidish.net/awjs/',
'http://themaninblue.com/experiment/AnimationBenchmark/canvas/',
'http://mix10k.visitmix.com/Entry/Details/169',
'http://www.craftymind.com/factory/guimark2/HTML5ChartingTest.html',
'http://www.chiptune.com/starfield/starfield.html',
'http://jarrodoverson.com/static/demos/particleSystem/',
'http://www.effectgames.com/demos/canvascycle/',
'http://www.thewildernessdowntown.com/',
'http://spielzeugz.de/html5/liquid-particles.html',
'http://hakim.se/experiments/html5/magnetic/02/',
'http://ie.microsoft.com/testdrive/Performance/LetItSnow/',
# crbug.com/501406 causes OOM failures on perf bots
# 'http://ie.microsoft.com/testdrive/Graphics/WorkerFountains/Default.html',
'http://ie.microsoft.com/testdrive/Graphics/TweetMap/Default.html',
'http://ie.microsoft.com/testdrive/Graphics/VideoCity/Default.html',
'http://ie.microsoft.com/testdrive/Performance/AsteroidBelt/Default.html',
'http://www.smashcat.org/av/canvas_test/',
# pylint: disable=C0301
'file://tough_canvas_cases/canvas2d_balls_common/bouncing_balls.html?ball=canvas_sprite&back=canvas',
# pylint: disable=C0301
'file://tough_canvas_cases/canvas2d_balls_common/bouncing_balls.html?ball=image_with_shadow&back=image',
# pylint: disable=C0301
'file://tough_canvas_cases/canvas2d_balls_common/bouncing_balls.html?ball=filled_path&back=gradient',
# pylint: disable=C0301
'file://tough_canvas_cases/canvas2d_balls_common/bouncing_balls.html?ball=text&back=white&ball_count=15',
'file://tough_canvas_cases/canvas-font-cycler.html',
'file://tough_canvas_cases/canvas-animation-no-clear.html',
'file://../../../chrome/test/data/perf/canvas_bench/single_image.html',
'file://../../../chrome/test/data/perf/canvas_bench/many_images.html'
]
for url in urls_list:
self.AddStory(ToughCanvasCasesPage(url, self))
| bsd-3-clause |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/components/test/data/autofill/merge/tools/reserialize_profiles_from_query.py | 162 | 1177 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from autofill_merge_common import SerializeProfiles, ColumnNameToFieldType
def main():
"""Serializes the output of the query 'SELECT * from autofill_profiles;'.
"""
COLUMNS = ['GUID', 'LABEL', 'FIRST_NAME', 'MIDDLE_NAME', 'LAST_NAME', 'EMAIL',
'COMPANY_NAME', 'ADDRESS_LINE_1', 'ADDRESS_LINE_2', 'CITY',
'STATE', 'ZIPCODE', 'COUNTRY', 'PHONE', 'DATE_MODIFIED']
if len(sys.argv) != 2:
print ("Usage: python reserialize_profiles_from_query.py "
"<path/to/serialized_profiles>")
return
types = [ColumnNameToFieldType(column_name) for column_name in COLUMNS]
profiles = []
with open(sys.argv[1], 'r') as serialized_profiles:
for line in serialized_profiles:
# trim the newline if present
if line[-1] == '\n':
line = line[:-1]
values = line.split("|")
profiles.append(zip(types, values))
print SerializeProfiles(profiles)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Examples/Catalyst/PythonDolfinExample/simulation-catalyst-step6.py | 1 | 8271 | """This demo program solves the incompressible Navier-Stokes equations
on an L-shaped domain using Chorin's splitting method."""
# Copyright (C) 2010-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Mikael Mortensen 2011
#
# First added: 2010-08-30
# Last changed: 2011-06-30
#
# SC14 Paraview's Catalyst tutorial
#
# Step 6 : Add field data arrays to VTK grid
#
# [SC14-Catalyst] we need a python environment that enables import of both Dolfin and ParaView
execfile("simulation-env.py")
# [SC14-Catalyst] import paraview, vtk and paraview's simple API
import sys
import paraview
import paraview.vtk as vtk
import paraview.simple as pvsimple
# [SC14-Catalyst] check for command line arguments
if len(sys.argv) != 3:
print "command is 'python",sys.argv[0],"<script name> <number of time steps>'"
sys.exit(1)
# [SC14-Catalyst] initialize and read input parameters
paraview.options.batch = True
paraview.options.symmetric = True
# [SC14-Catalyst] import user co-processing script
import vtkPVCatalystPython
import os
scriptpath, scriptname = os.path.split(sys.argv[1])
sys.path.append(scriptpath)
if scriptname.endswith(".py"):
print 'script name is ', scriptname
scriptname = scriptname[0:len(scriptname)-3]
try:
cpscript = __import__(scriptname)
except:
print sys.exc_info()
print 'Cannot find ', scriptname, ' -- no coprocessing will be performed.'
sys.exit(1)
# [SC14-Catalyst] Co-Processing routine to be called at the end of each simulation time step
def coProcess(grid, time, step):
# initialize data description
datadescription = vtkPVCatalystPython.vtkCPDataDescription()
datadescription.SetTimeData(time, step)
datadescription.AddInput("input")
cpscript.RequestDataDescription(datadescription)
inputdescription = datadescription.GetInputDescriptionByName("input")
if inputdescription.GetIfGridIsNecessary() == False:
return
if grid != None:
# attach VTK data set to pipeline input
inputdescription.SetGrid(grid)
# execute catalyst processing
cpscript.DoCoProcessing(datadescription)
# [SC14-Catalyst] convert dolfin mesh to a VTK unstructured grid
def Mesh2VTKUGrid(mesh):
vtkcelltypes=((),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_QUAD,vtk.VTK_POLYGON,vtk.VTK_POLYGON),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_TETRA,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_HEXAHEDRON))
npoints=mesh.num_vertices()
geom=mesh.geometry()
pts=vtk.vtkPoints()
pts.SetNumberOfPoints(npoints)
for i in xrange(npoints):
p=geom.point(i)
pts.SetPoint(i,p.x(),p.y(),p.z())
dim = mesh.topology().dim()
ncells=mesh.num_cells()
cells=vtk.vtkCellArray()
cellTypes=vtk.vtkUnsignedCharArray()
cellTypes.SetNumberOfTuples(ncells)
cellLocations=vtk.vtkIdTypeArray()
cellLocations.SetNumberOfTuples(ncells)
loc=0
for (cell,i) in zip(mesh.cells(),xrange(ncells)) :
ncellpoints=len(cell)
cells.InsertNextCell(ncellpoints)
for cpoint in cell:
cells.InsertCellPoint(cpoint)
cellTypes.SetTuple1(i,vtkcelltypes[dim][ncellpoints])
cellLocations.SetTuple1(i,loc)
loc+=1+ncellpoints
ugrid = vtk.vtkUnstructuredGrid()
ugrid.SetPoints(pts)
ugrid.SetCells(cellTypes,cellLocations,cells)
return ugrid
# [SC14-Catalyst] convert a flattened sequence of values to VTK double array
def Values2VTKArray(values,n,name):
ncomps=len(values)/n
array=vtk.vtkDoubleArray()
array.SetNumberOfComponents(ncomps)
array.SetNumberOfTuples(n)
for i in range(n):
a = []
for j in range(ncomps):
a.append(values[i+j*n])
array.SetTupleValue(i, a)
array.SetName(name)
return array
def AddFieldData(ugrid, pointArrays, cellArrays ):
# add Point data fields
npoints = ugrid.GetNumberOfPoints()
for (name,values) in pointArrays:
ugrid.GetPointData().AddArray( Values2VTKArray(values,npoints,name) )
# add Cell data fields
ncells = ugrid.GetNumberOfCells()
for (name,values) in cellArrays:
ugrid.GetCellData().AddArray( Values2VTKArray(values,ncells,name) )
# Begin demo
from dolfin import *
# Print log messages only from the root process in parallel
parameters["std_out_all_processes"] = False;
# Load mesh from file
mesh = Mesh(DOLFIN_EXAMPLE_DATA_DIR+"/lshape.xml.gz")
# Define function spaces (P2-P1)
V = VectorFunctionSpace(mesh, "Lagrange", 2)
Q = FunctionSpace(mesh, "Lagrange", 1)
# Define trial and test functions
u = TrialFunction(V)
p = TrialFunction(Q)
v = TestFunction(V)
q = TestFunction(Q)
# Set parameter values
dt = 0.01
T = 3
nu = 0.01
# Define time-dependent pressure boundary condition
p_in = Expression("sin(3.0*t)", t=0.0)
# Define boundary conditions
noslip = DirichletBC(V, (0, 0),
"on_boundary && \
(x[0] < DOLFIN_EPS | x[1] < DOLFIN_EPS | \
(x[0] > 0.5 - DOLFIN_EPS && x[1] > 0.5 - DOLFIN_EPS))")
inflow = DirichletBC(Q, p_in, "x[1] > 1.0 - DOLFIN_EPS")
outflow = DirichletBC(Q, 0, "x[0] > 1.0 - DOLFIN_EPS")
bcu = [noslip]
bcp = [inflow, outflow]
# Create functions
u0 = Function(V)
u1 = Function(V)
p1 = Function(Q)
# Define coefficients
k = Constant(dt)
f = Constant((0, 0))
# Tentative velocity step
F1 = (1/k)*inner(u - u0, v)*dx + inner(grad(u0)*u0, v)*dx + \
nu*inner(grad(u), grad(v))*dx - inner(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Pressure update
a2 = inner(grad(p), grad(q))*dx
L2 = -(1/k)*div(u1)*q*dx
# Velocity update
a3 = inner(u, v)*dx
L3 = inner(u1, v)*dx - k*inner(grad(p1), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Use amg preconditioner if available
prec = "amg" if has_krylov_solver_preconditioner("amg") else "default"
# Create files for storing solution
ufile = File("results/velocity.pvd")
pfile = File("results/pressure.pvd")
# Time-stepping
maxtimestep = int(sys.argv[2])
tstep = 0
t = dt
while tstep < maxtimestep:
# Update pressure boundary condition
p_in.t = t
# Compute tentative velocity step
begin("Computing tentative velocity")
b1 = assemble(L1)
[bc.apply(A1, b1) for bc in bcu]
solve(A1, u1.vector(), b1, "gmres", "default")
end()
# Pressure correction
begin("Computing pressure correction")
b2 = assemble(L2)
[bc.apply(A2, b2) for bc in bcp]
solve(A2, p1.vector(), b2, "gmres", prec)
end()
# Velocity correction
begin("Computing velocity correction")
b3 = assemble(L3)
[bc.apply(A3, b3) for bc in bcu]
solve(A3, u1.vector(), b3, "gmres", "default")
end()
# Plot solution [SC14-Catalyst] Not anymore
# plot(p1, title="Pressure", rescale=True)
# plot(u1, title="Velocity", rescale=True)
# Save to file [SC14-Catalyst] Not anymore
# ufile << u1
# pfile << p1
# [SC14-Catalyst] convert solution to VTK grid
ugrid = Mesh2VTKUGrid( u1.function_space().mesh() )
# [SC14-Catalyst] add field data to the VTK grid
velocity = u1.compute_vertex_values()
pressure = p1.compute_vertex_values()
AddFieldData( ugrid, [ ("Velocity",velocity) , ("Pressure",pressure) ] , [] )
# [SC14-Catalyst] trigger catalyst execution
coProcess(ugrid,t,tstep)
# Move to next time step
u0.assign(u1)
t += dt
tstep += 1
print "t =", t, "step =",tstep
# Hold plot [SC14-Catalyst] Not anymore
# interactive()
| gpl-3.0 |
Batterfii/django | django/contrib/admin/templatetags/admin_urls.py | 553 | 1812 | from django import template
from django.contrib.admin.utils import quote
from django.core.urlresolvers import Resolver404, get_script_prefix, resolve
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse
register = template.Library()
@register.filter
def admin_urlname(value, arg):
return 'admin:%s_%s_%s' % (value.app_label, value.model_name, arg)
@register.filter
def admin_urlquote(value):
return quote(value)
@register.simple_tag(takes_context=True)
def add_preserved_filters(context, url, popup=False, to_field=None):
opts = context.get('opts')
preserved_filters = context.get('preserved_filters')
parsed_url = list(urlparse(url))
parsed_qs = dict(parse_qsl(parsed_url[4]))
merged_qs = dict()
if opts and preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters))
match_url = '/%s' % url.partition(get_script_prefix())[2]
try:
match = resolve(match_url)
except Resolver404:
pass
else:
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if changelist_url == current_url and '_changelist_filters' in preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters']))
merged_qs.update(preserved_filters)
if popup:
from django.contrib.admin.options import IS_POPUP_VAR
merged_qs[IS_POPUP_VAR] = 1
if to_field:
from django.contrib.admin.options import TO_FIELD_VAR
merged_qs[TO_FIELD_VAR] = to_field
merged_qs.update(parsed_qs)
parsed_url[4] = urlencode(merged_qs)
return urlunparse(parsed_url)
| bsd-3-clause |
delhivery/django | tests/admin_custom_urls/models.py | 288 | 2513 | from functools import update_wrapper
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.db import models
from django.http import HttpResponseRedirect
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Action(models.Model):
name = models.CharField(max_length=50, primary_key=True)
description = models.CharField(max_length=70)
def __str__(self):
return self.name
class ActionAdmin(admin.ModelAdmin):
"""
A ModelAdmin for the Action model that changes the URL of the add_view
to '<app name>/<model name>/!add/'
The Action model has a CharField PK.
"""
list_display = ('name', 'description')
def remove_url(self, name):
"""
Remove all entries named 'name' from the ModelAdmin instance URL
patterns list
"""
return [url for url in super(ActionAdmin, self).get_urls() if url.name != name]
def get_urls(self):
# Add the URL of our custom 'add_view' view to the front of the URLs
# list. Remove the existing one(s) first
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
view_name = '%s_%s_add' % info
return [
url(r'^!add/$', wrap(self.add_view), name=view_name),
] + self.remove_url(view_name)
class Person(models.Model):
name = models.CharField(max_length=20)
class PersonAdmin(admin.ModelAdmin):
def response_post_save_add(self, request, obj):
return HttpResponseRedirect(
reverse('admin:admin_custom_urls_person_history', args=[obj.pk]))
def response_post_save_change(self, request, obj):
return HttpResponseRedirect(
reverse('admin:admin_custom_urls_person_delete', args=[obj.pk]))
class Car(models.Model):
name = models.CharField(max_length=20)
class CarAdmin(admin.ModelAdmin):
def response_add(self, request, obj, post_url_continue=None):
return super(CarAdmin, self).response_add(
request, obj, post_url_continue=reverse('admin:admin_custom_urls_car_history', args=[obj.pk]))
site = admin.AdminSite(name='admin_custom_urls')
site.register(Action, ActionAdmin)
site.register(Person, PersonAdmin)
site.register(Car, CarAdmin)
| bsd-3-clause |
diofeher/django-nfa | django/contrib/auth/__init__.py | 14 | 2883 | import datetime
from django.core.exceptions import ImproperlyConfigured
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = __import__(module, {}, {}, [attr])
except ImportError, e:
raise ImproperlyConfigured, 'Error importing authentication backend %s: "%s"' % (module, e)
except ValueError, e:
raise ImproperlyConfigured, 'Error importing authentication backends. Is AUTHENTICATION_BACKENDS a correctly defined list or tuple?'
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured, 'Module "%s" does not define a "%s" authentication backend' % (module, attr)
return cls()
def get_backends():
from django.conf import settings
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backends.append(load_backend(backend_path))
return backends
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
user = backend.authenticate(**credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request.
"""
if user is None:
user = request.user
# TODO: It would be nice to support different login methods, like signed cookies.
user.last_login = datetime.datetime.now()
user.save()
request.session[SESSION_KEY] = user.id
request.session[BACKEND_SESSION_KEY] = user.backend
if hasattr(request, 'user'):
request.user = user
def logout(request):
"""
Remove the authenticated user's ID from the request.
"""
try:
del request.session[SESSION_KEY]
except KeyError:
pass
try:
del request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user(request):
from django.contrib.auth.models import AnonymousUser
try:
user_id = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_path)
user = backend.get_user(user_id) or AnonymousUser()
except KeyError:
user = AnonymousUser()
return user
| bsd-3-clause |
progdupeupl/pdp_website | pdp/forum/migrations/0001_initial.py | 1 | 5896 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import datetime
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('position', models.IntegerField(verbose_name='Position', null=True, blank=True)),
('slug', models.SlugField(max_length=80)),
],
options={
'verbose_name_plural': 'Catégories',
'verbose_name': 'Catégorie',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Forum',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('subtitle', models.CharField(max_length=200, verbose_name='Sous-titre', blank=True)),
('position_in_category', models.IntegerField(verbose_name='Position dans la catégorie', null=True, blank=True)),
('slug', models.SlugField(max_length=80)),
('category', models.ForeignKey(to='forum.Category', verbose_name='Catégorie')),
],
options={
'verbose_name_plural': 'Forums',
'verbose_name': 'Forum',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('text', models.TextField(verbose_name='Texte')),
('pubdate', models.DateTimeField(auto_now_add=True, verbose_name='Date de publication')),
('update', models.DateTimeField(verbose_name="Date d'édition", null=True, blank=True)),
('position_in_topic', models.IntegerField(verbose_name='Position dans le sujet')),
('is_useful', models.BooleanField(default=False, verbose_name='Est utile')),
('is_moderated', models.BooleanField(default=False, verbose_name='Est modéré')),
('moderation_time', models.DateTimeField(default=datetime.datetime(2014, 11, 26, 20, 15, 36, 701382), verbose_name="Date d'édition")),
('moderation_text', models.TextField(default='', verbose_name='Explication de modération', blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Auteur', related_name='posts')),
('moderated_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Modérateur', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('subtitle', models.CharField(max_length=200, verbose_name='Sous-titre', blank=True)),
('pubdate', models.DateTimeField(auto_now_add=True, verbose_name='Date de création')),
('is_solved', models.BooleanField(default=False, verbose_name='Est résolu')),
('is_locked', models.BooleanField(default=False, verbose_name='Est verrouillé')),
('is_sticky', models.BooleanField(default=False, verbose_name='Est en post-it')),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Auteur', related_name='topics')),
('forum', models.ForeignKey(to='forum.Forum', verbose_name='Forum')),
('last_message', models.ForeignKey(to='forum.Post', verbose_name='Dernier message', related_name='last_message', null=True)),
],
options={
'verbose_name_plural': 'Sujets',
'verbose_name': 'Sujet',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TopicFollowed',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('topic', models.ForeignKey(to='forum.Topic')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='topics_followed')),
],
options={
'verbose_name_plural': 'Sujets suivis',
'verbose_name': 'Sujet suivi',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TopicRead',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('post', models.ForeignKey(to='forum.Post')),
('topic', models.ForeignKey(to='forum.Topic')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='topics_read')),
],
options={
'verbose_name_plural': 'Sujets lus',
'verbose_name': 'Sujet lu',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='post',
name='topic',
field=models.ForeignKey(to='forum.Topic', verbose_name='Sujet'),
preserve_default=True,
),
]
| agpl-3.0 |
emidln/django_roa | env/lib/python2.7/site-packages/django/contrib/gis/db/models/manager.py | 505 | 3578 | from django.db.models.manager import Manager
from django.contrib.gis.db.models.query import GeoQuerySet
class GeoManager(Manager):
"Overrides Manager to return Geographic QuerySets."
# This manager should be used for queries on related fields
# so that geometry columns on Oracle and MySQL are selected
# properly.
use_for_related_fields = True
def get_query_set(self):
return GeoQuerySet(self.model, using=self._db)
def area(self, *args, **kwargs):
return self.get_query_set().area(*args, **kwargs)
def centroid(self, *args, **kwargs):
return self.get_query_set().centroid(*args, **kwargs)
def collect(self, *args, **kwargs):
return self.get_query_set().collect(*args, **kwargs)
def difference(self, *args, **kwargs):
return self.get_query_set().difference(*args, **kwargs)
def distance(self, *args, **kwargs):
return self.get_query_set().distance(*args, **kwargs)
def envelope(self, *args, **kwargs):
return self.get_query_set().envelope(*args, **kwargs)
def extent(self, *args, **kwargs):
return self.get_query_set().extent(*args, **kwargs)
def extent3d(self, *args, **kwargs):
return self.get_query_set().extent3d(*args, **kwargs)
def force_rhr(self, *args, **kwargs):
return self.get_query_set().force_rhr(*args, **kwargs)
def geohash(self, *args, **kwargs):
return self.get_query_set().geohash(*args, **kwargs)
def geojson(self, *args, **kwargs):
return self.get_query_set().geojson(*args, **kwargs)
def gml(self, *args, **kwargs):
return self.get_query_set().gml(*args, **kwargs)
def intersection(self, *args, **kwargs):
return self.get_query_set().intersection(*args, **kwargs)
def kml(self, *args, **kwargs):
return self.get_query_set().kml(*args, **kwargs)
def length(self, *args, **kwargs):
return self.get_query_set().length(*args, **kwargs)
def make_line(self, *args, **kwargs):
return self.get_query_set().make_line(*args, **kwargs)
def mem_size(self, *args, **kwargs):
return self.get_query_set().mem_size(*args, **kwargs)
def num_geom(self, *args, **kwargs):
return self.get_query_set().num_geom(*args, **kwargs)
def num_points(self, *args, **kwargs):
return self.get_query_set().num_points(*args, **kwargs)
def perimeter(self, *args, **kwargs):
return self.get_query_set().perimeter(*args, **kwargs)
def point_on_surface(self, *args, **kwargs):
return self.get_query_set().point_on_surface(*args, **kwargs)
def reverse_geom(self, *args, **kwargs):
return self.get_query_set().reverse_geom(*args, **kwargs)
def scale(self, *args, **kwargs):
return self.get_query_set().scale(*args, **kwargs)
def snap_to_grid(self, *args, **kwargs):
return self.get_query_set().snap_to_grid(*args, **kwargs)
def svg(self, *args, **kwargs):
return self.get_query_set().svg(*args, **kwargs)
def sym_difference(self, *args, **kwargs):
return self.get_query_set().sym_difference(*args, **kwargs)
def transform(self, *args, **kwargs):
return self.get_query_set().transform(*args, **kwargs)
def translate(self, *args, **kwargs):
return self.get_query_set().translate(*args, **kwargs)
def union(self, *args, **kwargs):
return self.get_query_set().union(*args, **kwargs)
def unionagg(self, *args, **kwargs):
return self.get_query_set().unionagg(*args, **kwargs)
| bsd-3-clause |
trabucayre/gnuradio | gr-digital/python/digital/qa_correlate_access_code_tag.py | 4 | 3241 | #!/usr/bin/env python
#
# Copyright 2006,2007,2010,2011,2013,2017 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, digital, blocks
default_access_code = '\xAC\xDD\xA4\xE2\xF2\x8C\x20\xFC'
def string_to_1_0_list(s):
r = []
for ch in s:
x = ord(ch)
for i in range(8):
t = (x >> i) & 0x1
r.append(t)
return r
def to_1_0_string(L):
return ''.join([chr(x + ord('0')) for x in L])
class test_correlate_access_code(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
pad = (0,) * 64
src_data = (1, 0, 1, 1, 1, 1, 0, 1, 1) + pad + (0,) * 7
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_tag_bb("1011", 0, "sync")
dst = blocks.tag_debug(gr.sizeof_char, "sync")
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.current_tags()
self.assertEqual(len(result_data), 2)
self.assertEqual(result_data[0].offset, 4)
self.assertEqual(result_data[1].offset, 9)
def test_002(self):
code = tuple(string_to_1_0_list(default_access_code))
access_code = to_1_0_string(code)
pad = (0,) * 64
#print code
#print access_code
src_data = code + (1, 0, 1, 1) + pad
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_tag_bb(access_code, 0, "sync")
dst = blocks.tag_debug(gr.sizeof_char, "sync")
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.current_tags()
self.assertEqual(len(result_data), 1)
self.assertEqual(result_data[0].offset, len(code))
def test_003(self):
pad = (0,) * 64
src_bits = (1, 0, 1, 1, 1, 1, 0, 1, 1) + pad + (0,) * 7
src_data = [2.0*x - 1.0 for x in src_bits]
src = blocks.vector_source_f(src_data)
op = digital.correlate_access_code_tag_ff("1011", 0, "sync")
dst = blocks.tag_debug(gr.sizeof_float, "sync")
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.current_tags()
self.assertEqual(len(result_data), 2)
self.assertEqual(result_data[0].offset, 4)
self.assertEqual(result_data[1].offset, 9)
def test_004(self):
code = tuple(string_to_1_0_list(default_access_code))
access_code = to_1_0_string(code)
pad = (0,) * 64
#print code
#print access_code
src_bits = code + (1, 0, 1, 1) + pad
src_data = [2.0*x - 1.0 for x in src_bits]
src = blocks.vector_source_f(src_data)
op = digital.correlate_access_code_tag_ff(access_code, 0, "sync")
dst = blocks.tag_debug(gr.sizeof_float, "sync")
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.current_tags()
self.assertEqual(len(result_data), 1)
self.assertEqual(result_data[0].offset, len(code))
if __name__ == '__main__':
gr_unittest.run(test_correlate_access_code, "test_correlate_access_code_tag.xml")
| gpl-3.0 |
racmariano/skidom | backend/resorts/models/conditions.py | 1 | 2431 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .resort import Resort
from django.db import models
from django.contrib.postgres.fields import ArrayField
from dynamic_scraper.models import Scraper, SchedulerRuntime
from scrapy_djangoitem import DjangoItem
import datetime
# Past and forecasted conditions for a resort
class Conditions(models.Model):
# Hard-coded attributes needed for scraping
resort = models.ForeignKey(Resort, null = True, default=6)
conditions_page_url = models.URLField(blank = True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank = True, null = True, on_delete = models.SET_NULL)
# Attributes collected during scraping
date = models.DateField(default = datetime.date.today)
base_temp = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
summit_temp = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
wind_speed = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
base_depth = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
num_trails_open = models.IntegerField(default = 0)
new_snow_24_hr = models.IntegerField(default = 0)
#past_n_day_snowfall = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#past_n_day_wind_speed = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#future_n_day_snowfall = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#future_n_day_wind_speed = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
# For database querying
unique_id = models.CharField(default='', max_length = 200)
def __init__(self, *args, **kwargs):
super(Conditions, self).__init__(*args, **kwargs)
if not self.id:
day = datetime.date.today
self.conditions_page_url = self.resort.conditions_page_url
self.unique_id = self.resort.name+str(datetime.date.today())
def __unicode__(self):
return self.resort.name+": "+str(self.date)
def __str__(self):
return self.resort.name+": "+str(self.date)
class Meta:
verbose_name_plural = "Conditions"
class ConditionsItem(DjangoItem):
django_model = Conditions
| mit |
dardevelin/rhythmbox-gnome-fork | plugins/magnatune/MagnatuneSource.py | 3 | 17929 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2006 Adam Zimmerman <adam_zimmerman@sfu.ca>
# Copyright (C) 2006 James Livingston <doclivingston@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import xml
import urllib
import urlparse
import threading
import zipfile
import rb
from gi.repository import RB
from gi.repository import GObject, Gtk, Gdk, Gio
from TrackListHandler import TrackListHandler
from DownloadAlbumHandler import DownloadAlbumHandler, MagnatuneDownloadError
import MagnatuneAccount
import gettext
gettext.install('rhythmbox', RB.locale_dir())
magnatune_partner_id = "rhythmbox"
# URIs
magnatune_song_info_uri = "http://magnatune.com/info/song_info_xml.zip"
magnatune_changed_uri = "http://magnatune.com/info/changed.txt"
magnatune_buy_album_uri = "https://magnatune.com/buy/choose?"
magnatune_api_download_uri = "http://%s:%s@download.magnatune.com/buy/membership_free_dl_xml?"
magnatune_in_progress_dir = Gio.file_new_for_path(RB.user_data_dir()).resolve_relative_path('magnatune')
magnatune_cache_dir = Gio.file_new_for_path(RB.user_cache_dir()).resolve_relative_path('magnatune')
magnatune_song_info = os.path.join(magnatune_cache_dir.get_path(), 'song_info.xml')
magnatune_song_info_temp = os.path.join(magnatune_cache_dir.get_path(), 'song_info.zip.tmp')
magnatune_changes = os.path.join(magnatune_cache_dir.get_path(), 'changed.txt')
class MagnatuneSource(RB.BrowserSource):
def __init__(self):
RB.BrowserSource.__init__(self)
self.hate = self
self.__settings = Gio.Settings("org.gnome.rhythmbox.plugins.magnatune")
# source state
self.__activated = False
self.__db = None
self.__notify_id = 0 # GObject.idle_add id for status notifications
self.__info_screen = None # the loading screen
# track data
self.__sku_dict = {}
self.__home_dict = {}
self.__art_dict = {}
# catalogue stuff
self.__updating = True # whether we're loading the catalog right now
self.__has_loaded = False # whether the catalog has been loaded yet
self.__update_id = 0 # GObject.idle_add id for catalog updates
self.__catalogue_loader = None
self.__catalogue_check = None
self.__load_progress = (0, 0) # (complete, total)
# album download stuff
self.__downloads = {} # keeps track of download progress for each file
self.__copies = {} # keeps copy objects for each file
self.__art_store = RB.ExtDB(name="album-art")
#
# RBSource methods
#
def do_impl_show_entry_popup(self):
self.show_source_popup("/MagnatuneSourceViewPopup")
def do_get_status(self, status, progress_text, progress):
if self.__updating:
complete, total = self.__load_progress
if total > 0:
progress = min(float(complete) / total, 1.0)
else:
progress = -1.0
return (_("Loading Magnatune catalog"), None, progress)
elif len(self.__downloads) > 0:
complete, total = map(sum, zip(*self.__downloads.itervalues()))
if total > 0:
progress = min(float(complete) / total, 1.0)
else:
progress = -1.0
return (_("Downloading Magnatune Album(s)"), None, progress)
else:
qm = self.props.query_model
return (qm.compute_status_normal("%d song", "%d songs"), None, 2.0)
def do_selected(self):
if not self.__activated:
shell = self.props.shell
self.__db = shell.props.db
self.__entry_type = self.props.entry_type
if not magnatune_in_progress_dir.query_exists(None):
magnatune_in_progress_path = magnatune_in_progress_dir.get_path()
os.mkdir(magnatune_in_progress_path, 0700)
if not magnatune_cache_dir.query_exists(None):
magnatune_cache_path = magnatune_cache_dir.get_path()
os.mkdir(magnatune_cache_path, 0700)
self.__activated = True
self.__show_loading_screen(True)
# start our catalogue updates
self.__update_id = GObject.timeout_add_seconds(6 * 60 * 60, self.__update_catalogue)
self.__update_catalogue()
def do_impl_can_delete(self):
return False
def do_pack_content(self, content):
self.__paned_box = Gtk.VBox(homogeneous=False, spacing=5)
self.pack_start(self.__paned_box, True, True, 0)
self.__paned_box.pack_start(content, True, True, 0)
def do_delete_thyself(self):
if self.__update_id != 0:
GObject.source_remove(self.__update_id)
self.__update_id = 0
if self.__notify_id != 0:
GObject.source_remove(self.__notify_id)
self.__notify_id = 0
if self.__catalogue_loader is not None:
self.__catalogue_loader.cancel()
self.__catalogue_loader = None
if self.__catalogue_check is not None:
self.__catalogue_check.cancel()
self.__catalogue_check = None
RB.BrowserSource.do_delete_thyself(self)
#
# methods for use by plugin and UI
#
def display_artist_info(self):
screen = self.props.shell.props.window.get_screen()
tracks = self.get_entry_view().get_selected_entries()
urls = set([])
for tr in tracks:
sku = self.__sku_dict[tr.get_string(RB.RhythmDBPropType.LOCATION)]
url = self.__home_dict[sku]
if url not in urls:
Gtk.show_uri(screen, url, Gdk.CURRENT_TIME)
urls.add(url)
def download_redirect(self):
screen = self.props.shell.props.window.get_screen()
tracks = self.get_entry_view().get_selected_entries()
urls = set([])
for tr in tracks:
sku = self.__sku_dict[tr.get_string(RB.RhythmDBPropType.LOCATION)]
url = magnatune_buy_album_uri + urllib.urlencode({ 'sku': sku, 'ref': magnatune_partner_id })
if url not in urls:
Gtk.show_uri(screen, url, Gdk.CURRENT_TIME)
urls.add(url)
def download_album(self):
if self.__settings['account-type'] != 'download':
# The user doesn't have a download account, so redirect them to the download signup page
self.download_redirect()
return
try:
# Just use the first library location
library = Gio.Settings("org.gnome.rhythmbox.rhythmdb")
library_location = library['locations'][0]
except IndexError, e:
RB.error_dialog(title = _("Couldn't download album"),
message = _("You must have a library location set to download an album."))
return
tracks = self.get_entry_view().get_selected_entries()
skus = []
for track in tracks:
sku = self.__sku_dict[track.get_string(RB.RhythmDBPropType.LOCATION)]
if sku in skus:
continue
skus.append(sku)
self.__auth_download(sku)
#
# internal catalogue downloading and loading
#
def __update_catalogue(self):
def update_cb(remote_changes):
self.__catalogue_check = None
try:
f = open(magnatune_changes, 'r')
local_changes = f.read().strip()
except:
local_changes = ""
remote_changes = remote_changes.strip()
print "local checksum %s, remote checksum %s" % (local_changes, remote_changes)
if local_changes != remote_changes:
try:
f = open(magnatune_changes, 'w')
f.write(remote_changes + "\n")
f.close()
except Exception, e:
print "unable to write local change id: %s" % str(e)
download_catalogue()
elif self.__has_loaded is False:
load_catalogue()
def download_catalogue():
def find_song_info(catalogue):
for info in catalogue.infolist():
if info.filename.endswith("song_info.xml"):
return info.filename;
return None
def download_progress(copy, complete, total, self):
self.__load_progress = (complete, total)
self.__notify_status_changed()
def download_finished(copy, success, self):
if not success:
print "catalog download failed"
print copy.get_error()
return
print "catalog download successful"
# done downloading, unzip to real location
catalog_zip = zipfile.ZipFile(magnatune_song_info_temp)
catalog = open(magnatune_song_info, 'w')
filename = find_song_info(catalog_zip)
if filename is None:
RB.error_dialog(title=_("Unable to load catalog"),
message=_("Rhythmbox could not understand the Magnatune catalog, please file a bug."))
return
catalog.write(catalog_zip.read(filename))
catalog.close()
catalog_zip.close()
df = Gio.file_new_for_path(magnatune_song_info_temp)
df.delete(None)
self.__updating = False
self.__catalogue_loader = None
self.__notify_status_changed()
load_catalogue()
self.__updating = True
try:
df = Gio.file_new_for_path(magnatune_song_info_temp)
df.delete(None)
except:
pass
self.__catalog_loader = RB.AsyncCopy()
self.__catalog_loader.set_progress(download_progress, self)
self.__catalog_loader.start(magnatune_song_info_uri, magnatune_song_info_temp, download_finished, self)
def load_catalogue():
def catalogue_chunk_cb(loader, data, total, parser):
if data is None:
error = loader.get_error()
if error:
# report error somehow?
print "error loading catalogue: %s" % error
try:
parser.close()
except xml.sax.SAXParseException, e:
# there isn't much we can do here
print "error parsing catalogue: %s" % e
self.__show_loading_screen(False)
self.__updating = False
self.__catalogue_loader = None
# restart in-progress downloads
# (doesn't really belong here)
for f in magnatune_in_progress_dir.enumerate_children('standard::name', Gio.FileQueryInfoFlags.NONE, None):
name = f.get_name()
if not name.startswith("in_progress_"):
continue
(result, uri, etag) = magnatune_in_progress_dir.resolve_relative_path(name).load_contents(None)
print "restarting download from %s" % uri
self.__download_album(uri, name[12:])
else:
# hack around some weird chars that show up in the catalogue for some reason
data = str(data.str)
data = data.replace("\x19", "'")
data = data.replace("\x13", "-")
# argh.
data = data.replace("Rock & Roll", "Rock & Roll")
try:
parser.feed(data)
except xml.sax.SAXParseException, e:
print "error parsing catalogue: %s" % e
load_size['size'] += len(data)
self.__load_progress = (load_size['size'], total)
self.__notify_status_changed()
self.__has_loaded = True
self.__updating = True
self.__load_progress = (0, 0) # (complete, total)
self.__notify_status_changed()
load_size = {'size': 0}
parser = xml.sax.make_parser()
parser.setContentHandler(TrackListHandler(self.__db, self.__entry_type, self.__sku_dict, self.__home_dict, self.__art_dict))
self.__catalogue_loader = RB.ChunkLoader()
self.__catalogue_loader.set_callback(catalogue_chunk_cb, parser)
self.__catalogue_loader.start(magnatune_song_info, 64*1024)
self.__catalogue_check = rb.Loader()
self.__catalogue_check.get_url(magnatune_changed_uri, update_cb)
def __show_loading_screen(self, show):
if self.__info_screen is None:
# load the builder stuff
builder = Gtk.Builder()
builder.add_from_file(rb.find_plugin_file(self.props.plugin, "magnatune-loading.ui"))
self.__info_screen = builder.get_object("magnatune_loading_scrolledwindow")
self.pack_start(self.__info_screen, True, True, 0)
self.get_entry_view().set_no_show_all(True)
self.__info_screen.set_no_show_all(True)
self.__info_screen.set_property("visible", show)
self.__paned_box.set_property("visible", not show)
def __notify_status_changed(self):
def change_idle_cb():
self.notify_status_changed()
self.__notify_id = 0
return False
if self.__notify_id == 0:
self.__notify_id = GObject.idle_add(change_idle_cb)
#
# internal purchasing code
#
def __auth_download(self, sku): # http://magnatune.com/info/api
def auth_data_cb(data, (username, password)):
dl_album_handler = DownloadAlbumHandler(self.__settings['format'])
auth_parser = xml.sax.make_parser()
auth_parser.setContentHandler(dl_album_handler)
if data is None:
# hmm.
return
try:
data = data.replace("<br>", "") # get rid of any stray <br> tags that will mess up the parser
data = data.replace(" & ", " & ") # clean up some missing escaping
# print data
auth_parser.feed(data)
auth_parser.close()
# process the URI: add authentication info, quote the filename component for some reason
parsed = urlparse.urlparse(dl_album_handler.url)
netloc = "%s:%s@%s" % (username, password, parsed.hostname)
spath = os.path.split(urllib.url2pathname(parsed.path))
basename = spath[1]
path = urllib.pathname2url(os.path.join(spath[0], urllib.quote(basename)))
authed = (parsed[0], netloc, path) + parsed[3:]
audio_dl_uri = urlparse.urlunparse(authed)
print "download uri for %s is %s" % (sku, audio_dl_uri)
self.__download_album(audio_dl_uri, sku)
except MagnatuneDownloadError, e:
RB.error_dialog(title = _("Download Error"),
message = _("An error occurred while trying to authorize the download.\nThe Magnatune server returned:\n%s") % str(e))
except Exception, e:
sys.excepthook(*sys.exc_info())
RB.error_dialog(title = _("Error"),
message = _("An error occurred while trying to download the album.\nThe error text is:\n%s") % str(e))
print "downloading album: " + sku
account = MagnatuneAccount.instance()
(account_type, username, password) = account.get()
url_dict = {
'id': magnatune_partner_id,
'sku': sku
}
url = magnatune_api_download_uri % (username, password)
url = url + urllib.urlencode(url_dict)
l = rb.Loader()
l.get_url(url, auth_data_cb, (username, password))
def __download_album(self, audio_dl_uri, sku):
def download_progress(copy, complete, total, self):
self.__downloads[audio_dl_uri] = (complete, total)
self.__notify_status_changed()
def download_finished(copy, success, self):
del self.__downloads[audio_dl_uri]
del self.__copies[audio_dl_uri]
print "download of %s finished: %s" % (audio_dl_uri, success)
if success:
threading.Thread(target=unzip_album).start()
else:
remove_download_files()
if len(self.__downloads) == 0: # All downloads are complete
shell = self.props.shell
manager = shell.props.ui_manager
manager.get_action("/MagnatuneSourceViewPopup/MagnatuneCancelDownload").set_sensitive(False)
if success:
shell.notify_custom(4000, _("Finished Downloading"), _("All Magnatune downloads have been completed."), None, False)
self.__notify_status_changed()
def unzip_album():
# just use the first library location
library = Gio.Settings("org.gnome.rhythmbox.rhythmdb")
library_location = Gio.file_new_for_uri(library['locations'][0])
print "unzipping %s" % dest.get_path()
album = zipfile.ZipFile(dest.get_path())
for track in album.namelist():
track_uri = library_location.resolve_relative_path(track).get_uri()
print "zip file entry: %s => %s" % (track, track_uri)
track_uri = RB.sanitize_uri_for_filesystem(track_uri)
RB.uri_create_parent_dirs(track_uri)
track_out = Gio.file_new_for_uri(track_uri).create(Gio.FileCreateFlags.NONE, None)
if track_out is not None:
track_out.write(album.read(track), None)
track_out.close(None)
print "adding %s to library" % track_uri
self.__db.add_uri(track_uri)
album.close()
remove_download_files()
def remove_download_files():
print "removing download files"
in_progress.delete(None)
dest.delete(None)
in_progress = magnatune_in_progress_dir.resolve_relative_path("in_progress_" + sku)
dest = magnatune_in_progress_dir.resolve_relative_path(sku)
in_progress.replace_contents(str(audio_dl_uri),
None,
False,
Gio.FileCreateFlags.PRIVATE|Gio.FileCreateFlags.REPLACE_DESTINATION,
None)
shell = self.props.shell
manager = shell.props.ui_manager
manager.get_action("/MagnatuneSourceViewPopup/MagnatuneCancelDownload").set_sensitive(True)
try:
# For some reason, Gio.FileCopyFlags.OVERWRITE doesn't work for copy_async
dest.delete(None)
except:
pass
dl = RB.AsyncCopy()
dl.set_progress(download_progress, self)
dl.start(audio_dl_uri, dest.get_uri(), download_finished, self)
self.__downloads[audio_dl_uri] = (0, 0) # (current, total)
self.__copies[audio_dl_uri] = dl
def cancel_downloads(self):
for download in self.__copies.values():
download.cancel()
shell = self.props.shell
manager = shell.props.ui_manager
manager.get_action("/MagnatuneSourceViewPopup/MagnatuneCancelDownload").set_sensitive(False)
def playing_entry_changed(self, entry):
if not self.__db or not entry:
return
if entry.get_entry_type() != self.__db.entry_type_get_by_name("MagnatuneEntryType"):
return
sku = self.__sku_dict[entry.get_string(RB.RhythmDBPropType.LOCATION)]
key = RB.ExtDBKey.create_storage("album", entry.get_string(RB.RhythmDBPropType.ALBUM))
key.add_field("artist", entry.get_string(RB.RhythmDBPropType.ARTIST))
self.__art_store.store_uri(key, self.__art_dict[sku])
GObject.type_register(MagnatuneSource)
| gpl-2.0 |
kubernetes-client/python | kubernetes/client/models/v1alpha1_pod_preset_spec.py | 1 | 7114 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1PodPresetSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'selector': 'V1LabelSelector',
'volume_mounts': 'list[V1VolumeMount]',
'volumes': 'list[V1Volume]'
}
attribute_map = {
'env': 'env',
'env_from': 'envFrom',
'selector': 'selector',
'volume_mounts': 'volumeMounts',
'volumes': 'volumes'
}
def __init__(self, env=None, env_from=None, selector=None, volume_mounts=None, volumes=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1PodPresetSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._env = None
self._env_from = None
self._selector = None
self._volume_mounts = None
self._volumes = None
self.discriminator = None
if env is not None:
self.env = env
if env_from is not None:
self.env_from = env_from
if selector is not None:
self.selector = selector
if volume_mounts is not None:
self.volume_mounts = volume_mounts
if volumes is not None:
self.volumes = volumes
@property
def env(self):
"""Gets the env of this V1alpha1PodPresetSpec. # noqa: E501
Env defines the collection of EnvVar to inject into containers. # noqa: E501
:return: The env of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this V1alpha1PodPresetSpec.
Env defines the collection of EnvVar to inject into containers. # noqa: E501
:param env: The env of this V1alpha1PodPresetSpec. # noqa: E501
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""Gets the env_from of this V1alpha1PodPresetSpec. # noqa: E501
EnvFrom defines the collection of EnvFromSource to inject into containers. # noqa: E501
:return: The env_from of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""Sets the env_from of this V1alpha1PodPresetSpec.
EnvFrom defines the collection of EnvFromSource to inject into containers. # noqa: E501
:param env_from: The env_from of this V1alpha1PodPresetSpec. # noqa: E501
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def selector(self):
"""Gets the selector of this V1alpha1PodPresetSpec. # noqa: E501
:return: The selector of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1alpha1PodPresetSpec.
:param selector: The selector of this V1alpha1PodPresetSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def volume_mounts(self):
"""Gets the volume_mounts of this V1alpha1PodPresetSpec. # noqa: E501
VolumeMounts defines the collection of VolumeMount to inject into containers. # noqa: E501
:return: The volume_mounts of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""Sets the volume_mounts of this V1alpha1PodPresetSpec.
VolumeMounts defines the collection of VolumeMount to inject into containers. # noqa: E501
:param volume_mounts: The volume_mounts of this V1alpha1PodPresetSpec. # noqa: E501
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def volumes(self):
"""Gets the volumes of this V1alpha1PodPresetSpec. # noqa: E501
Volumes defines the collection of Volume to inject into the pod. # noqa: E501
:return: The volumes of this V1alpha1PodPresetSpec. # noqa: E501
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this V1alpha1PodPresetSpec.
Volumes defines the collection of Volume to inject into the pod. # noqa: E501
:param volumes: The volumes of this V1alpha1PodPresetSpec. # noqa: E501
:type: list[V1Volume]
"""
self._volumes = volumes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1PodPresetSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1PodPresetSpec):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 |
Krupamani/Shodana | django-admin-sortable/setup.py | 5 | 1054 | from setuptools import setup, find_packages
try:
README = open('README').read()
except:
README = None
setup(
name='django-admin-sortable',
version=__import__('adminsortable').__version__,
description='Drag and drop sorting for models and inline models in Django admin.',
long_description=README,
license='APL',
author='Brandon Taylor',
author_email='alsoicode@gmail.com',
url='https://github.com/iambrandontaylor/django-admin-sortable',
packages=find_packages(exclude=['sample_project']),
zip_safe=False,
include_package_data=True,
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
)
| agpl-3.0 |
artursmet/django-registration | setup.py | 27 | 1877 | from distutils.core import setup
import os
from registration import get_version
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('registration'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[13:] # Strip "registration/" or "registration\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
setup(name='django-registration',
version=get_version().replace(' ', '-'),
description='An extensible user-registration application for Django',
author='James Bennett',
author_email='james@b-list.org',
url='http://www.bitbucket.org/ubernostrum/django-registration/wiki/',
download_url='http://www.bitbucket.org/ubernostrum/django-registration/get/v0.8.gz',
package_dir={'registration': 'registration'},
packages=packages,
package_data={'registration': data_files},
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'],
)
| bsd-3-clause |
gechr/ansible-modules-extras | system/make.py | 44 | 3395 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: make
short_description: Run targets in a Makefile
requirements: [ make ]
version_added: "2.1"
author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
description:
- Run targets in a Makefile.
options:
target:
description:
- The target to run
required: false
default: none
params:
description:
- Any extra parameters to pass to make
required: false
default: none
chdir:
description:
- cd into this directory before running make
required: true
'''
EXAMPLES = '''
# Build the default target
- make: chdir=/home/ubuntu/cool-project
# Run `install` target as root
- make: chdir=/home/ubuntu/cool-project target=install
become: yes
# Pass in extra arguments to build
- make:
chdir: /home/ubuntu/cool-project
target: all
params:
NUM_THREADS: 4
BACKEND: lapack
'''
# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
# fix this
RETURN = '''# '''
def format_params(params):
return [k + '=' + str(v) for k, v in params.iteritems()]
def push_arguments(cmd, args):
if args['target'] != None:
cmd.append(args['target'])
if args['params'] != None:
cmd.extend(format_params(args['params']))
return cmd
def check_changed(make_path, module, args):
cmd = push_arguments([make_path, '--question'], args)
rc, _, __ = module.run_command(cmd, check_rc=False, cwd=args['chdir'])
return (rc != 0)
def run_make(make_path, module, args):
cmd = push_arguments([make_path], args)
module.run_command(cmd, check_rc=True, cwd=args['chdir'])
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
target=dict(required=False, default=None, type='str'),
params=dict(required=False, default=None, type='dict'),
chdir=dict(required=True, default=None, type='str'),
),
)
args = dict(
changed=False,
failed=False,
target=module.params['target'],
params=module.params['params'],
chdir=module.params['chdir'],
)
make_path = module.get_bin_path('make', True)
# Check if target is up to date
args['changed'] = check_changed(make_path, module, args)
# Check only; don't modify
if module.check_mode:
module.exit_json(changed=args['changed'])
# Target is already up to date
if args['changed'] == False:
module.exit_json(**args)
run_make(make_path, module, args)
module.exit_json(**args)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
justinmeister/spaceinvaders-spyral | game/level.py | 1 | 1501 | import os
import spyral
from .sprites import sprite
from . import collision
WIDTH = 1200
HEIGHT = 900
WHITE = (255, 255, 255)
SIZE = (WIDTH, HEIGHT)
GREEN = (60, 179, 113)
RED = (255, 0, 0)
BLACKBLUE = (19, 15, 48)
BG_COLOR = BLACKBLUE
ENEMYGAP = 30
XMARGIN = 175
YMARGIN = 100
MOVEX = 15
MOVEY = 20
ENEMYSIDE = 50
BACKGROUND = os.path.join("game", "graphics", "spacebackground.png")
class Level1(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self, SIZE)
self.space = spyral.Image(filename=BACKGROUND)
self.background = self.space.scale((1200, 900))
self.collision_handler = collision.CollisionHandler(self)
self.player = sprite.Player(self, 'left', self.collision_handler)
self.alien_list = self.make_aliens(6, 3)
self.collision_handler.add_player(self.player)
self.collision_handler.add_aliens(self.alien_list)
spyral.event.register("system.quit", spyral.director.pop)
spyral.event.register("director.update", self.update)
spyral.event.register("input.keyboard.down.q", spyral.director.pop)
def update(self, delta):
pass
def make_aliens(self, columns, rows):
"""
Make aliens and send them to collision handler.
"""
alien_list = []
for column in range(columns):
for row in range(rows):
alien = sprite.Alien(self, row, column)
alien_list.append(alien)
return alien_list
| mit |
nakamura-akifumi/kassis_orange | app_search/helpers/paginate_helper.py | 1 | 1579 | import math
class Paginate:
def __init__(self, pagetab_count = 5, per_page = 10):
pass
self.pagetab_count = pagetab_count
self.per_page = per_page
def paginate(self, result_count, current_page):
paginate_list = []
pagetab_count = self.pagetab_count
per_page = self.per_page
max_page = math.floor((result_count) / per_page)
if max_page <= pagetab_count:
sp = current_page
ep = sp + pagetab_count
elif current_page > 3 and max_page - 2 > current_page:
sp = current_page - 2
ep = sp + pagetab_count
elif current_page <= 3 and max_page > current_page + pagetab_count:
sp = 1
ep = sp + pagetab_count
else:
sp = max_page - pagetab_count + 1
ep = max_page + 1
for p in range(sp, ep):
x = {"key": str(p), "display_name": str(p), "current": "0"}
if p == current_page:
x.update({"current": "1"})
paginate_list.append(x)
paginate = {}
paginate.update({"list": paginate_list})
if current_page != 1:
paginate.update({"first": {"key": "1"}})
if current_page != max_page:
paginate.update({"last": {"key": str(max_page)}})
if current_page - 1 > 1:
paginate.update({"previous": {"key": str(current_page - 1)}})
if current_page + 1 <= max_page:
paginate.update({"next": {"key": str(current_page + 1)}})
return {"paginate": paginate}
| mit |
harryliang/iScript | unzip.py | 22 | 1752 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import sys
import zipfile
import argparse
s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template
def unzip(path):
file = zipfile.ZipFile(path,"r")
if args.secret:
file.setpassword(args.secret)
for name in file.namelist():
try:
utf8name=name.decode('gbk')
pathname = os.path.dirname(utf8name)
except:
utf8name=name
pathname = os.path.dirname(utf8name)
#print s % (1, 92, ' >> extracting:'), utf8name
#pathname = os.path.dirname(utf8name)
if not os.path.exists(pathname) and pathname != "":
os.makedirs(pathname)
data = file.read(name)
if not os.path.exists(utf8name):
try:
fo = open(utf8name, "w")
fo.write(data)
fo.close
except:
pass
file.close()
def main(argv):
######################################################
# for argparse
p = argparse.ArgumentParser(description='解决unzip乱码')
p.add_argument('xxx', type=str, nargs='*', \
help='命令对象.')
p.add_argument('-s', '--secret', action='store', \
default=None, help='密码')
global args
args = p.parse_args(argv[1:])
xxx = args.xxx
for path in xxx:
if path.endswith('.zip'):
if os.path.exists(path):
print s % (1, 97, ' ++ unzip:'), path
unzip(path)
else:
print s % (1, 91, ' !! file doesn\'t exist.'), path
else:
print s % (1, 91, ' !! file isn\'t a zip file.'), path
if __name__ == '__main__':
argv = sys.argv
main(argv)
| mit |
kisonecat/sequences-and-series | quizzes/quizzes.py | 3 | 12168 | from sage.all import *
from questions import *
import time
################################################################
class Quiz:
"""Base class for all quizzes"""
question_classes = []
title = 'Quiz'
preamble = ''
def __init__(self):
self.questions = [question_class() for question_class in self.__class__.question_classes]
@classmethod
def forum_list(cls):
problem_number = 1
for question_class in cls.question_classes:
print("Q{problem_number}: {title}".format(problem_number = problem_number, title=question_class.title))
problem_number = problem_number + 1
@classmethod
def coursera_xml(cls):
header = """<quiz>
<metadata>
<title><![CDATA[{title}]]></title>
<open_time>2012-12-31 1900</open_time>
<soft_close_time>2014-03-10 1611</soft_close_time>
<hard_close_time>2014-04-05 1611</hard_close_time>
<duration>0</duration>
<retry_delay>0</retry_delay>
<maximum_submissions>2</maximum_submissions>
<modified_time>{modified_time}</modified_time>
<is_ace_exam_flag>none</is_ace_exam_flag>
<authentication_required>1</authentication_required>
<parameters>
<show_explanations>
<question>before_soft_close_time</question>
<option>before_soft_close_time</option>
<score>before_soft_close_time</score>
</show_explanations>
</parameters>
<maximum_score>{maximum_score}</maximum_score>
</metadata>
<preamble><![CDATA[{preamble}]]></preamble>
<data><question_groups>
""".format(title=cls.title, maximum_score=9*len(cls.question_classes), modified_time=int(1000*time.time()), preamble=cls.preamble)
footer = """ </question_groups>
</data>
</quiz>"""
return header + join(['<question_group select="1">' + question_class.coursera_xml() + '</question_group>' for question_class in cls.question_classes],"\n") + footer
################################################################
# Quiz 1
import evaluateGeometricSeriesFraction
import computeSequenceRecursively
import identifyGeometricProgression
import identifyArithmeticProgression
import identifyMonotonicSequence
import defineSequenceBounded
import defineLimitOfSequence
import findSufficientlyLargeForEpsilon
import findSequenceLimit
import determineSequenceBounded
import applyMonotoneConvergence
################################################################
# Quiz 2
import defineValueOfSeries
import evaluateGeometricSeriesFraction
import evaluateTelescopingSeries
import analyzeGeometricSeries
import applyNthTermTest
import applyComparisonTest
import analyzeHarmonicSeries
################################################################
# Quiz 3
import applyRatioTest
import applyRatioTestWithFactorials
import applyRatioTestWithFactorialsAndPowers
import comparePSeries
import identifyPSeries
import applyRootTest
import boundSeriesByIntegrating
################################################################
# Quiz 4
import relateAbsoluteConvergenceToConvergence
import applyLimitComparisonTest
import applyAlternatingSeriesTest
import identifyAlternatingPSeries
import approximateAlternatingSeries
import approximateLogarithm
import considerNonmonotoneAlternatingSeries
################################################################
# Quiz 5
import findIntervalOfConvergence
import findRadiusOfConvergence
import identifyPowerSeries
import transformGeometricSeries
import multiplyPowerSeries
import differentiateTermByTerm
import approximateErf
################################################################
# Quiz 6
import findBeginningOfTaylorSeries
import substituteTrigonometricTaylorSeries
import approximateTrigFunction
import evaluateLimitWithSeries
import computeTaylorSeriesForPolynomial
import boundFunctionWithTaylorsTheorem
import recallCommonTaylorSeries
################################################################
# Final
import findDerivativeFromTaylorSeries
import approximateArctangent
class SixthQuiz(Quiz):
title = 'Homework 6'
preamble = 'This is the last homework! And like all the homeworks in this course, is an example of “formative assessment.” That does not mean it is not worth points, but the points are only there to encourage you to complete it, not to judge you. Please, please, use the hints when you get stuck—you should not just be solving problems, but learning things by completing this homework assignment. Discuss freely on the forums. If you get the right answer but do not understand why, please ask. Take the quiz again and again—the questions will change each time. Feel free to use the provided resources in whatever way helps you to understand the material. You have made it so far in this course, so I know that you can do this last assignment. ~jim'
question_classes = [
findBeginningOfTaylorSeries.Question,
computeTaylorSeriesForPolynomial.Question,
recallCommonTaylorSeries.Question,
evaluateLimitWithSeries.Question,
# boundFunctionWithTaylorsTheorem.Question,
approximateTrigFunction.Question,
substituteTrigonometricTaylorSeries.Question,
]
class FifthQuiz(Quiz):
title = 'Homework 5'
preamble = 'This homework, like all the homeworks in this course, is an example of “formative assessment.” That does not mean it is not worth points, but the points are there to encourage you to complete it, not to judge you. Please, use the hints when you get stuck. Discuss freely on the forums. If you get the right answer but do not understand why, please ask. Take the quiz again and again—the questions will change each time. Feel free to use the provided resources in whatever way helps you to understand the material. I know that you can do it. ~jim'
question_classes = [
identifyPowerSeries.Question,
findIntervalOfConvergence.Question,
findRadiusOfConvergence.Question,
differentiateTermByTerm.Question,
transformGeometricSeries.Question,
multiplyPowerSeries.Question,
approximateErf.Question
]
class FourthQuiz(Quiz):
title = 'Homework 4'
preamble = 'This homework, like all the homeworks in this course, is an example of “formative assessment.” As such, this homework is not so much about you showing me how much you have learned; the final exam (in just a few weeks now!) will handle that. Rather, this homework is part of the process of learning. Use the hints when you get stuck. Discuss freely on the forums. Take the quiz again and again. Feel free to use the provided resources in whatever way helps you to understand the material. You are making progress on the homework, and with just a few more weeks of work, I know you will do a great job on the final exam. ~jim'
question_classes = [
relateAbsoluteConvergenceToConvergence.Question,
applyLimitComparisonTest.Question,
identifyAlternatingPSeries.Question,
applyAlternatingSeriesTest.Question,
approximateAlternatingSeries.Question,
approximateLogarithm.Question,
considerNonmonotoneAlternatingSeries.Question
]
class ThirdQuiz(Quiz):
title = 'Homework 3'
preamble = 'This homework, like all the homeworks in this course, is an example of “formative assessment.” As such, this homework is not so much about you showing me how much you have learned; the final exam (in just a few weeks now!) will handle that. Rather, this homework is part of the process of learning. Use the hints when you get stuck. Discuss freely on the forums. Take the quiz again and again. Feel free to use the provided resources in whatever way helps you to understand the material. You are making progress on the homework, and with just a few more weeks of work, I know you will do a great job on the final exam. ~jim'
question_classes = [
applyRatioTest.Question,
applyRatioTestWithFactorials.Question,
applyRatioTestWithFactorialsAndPowers.Question,
identifyPSeries.Question,
comparePSeries.Question,
applyRootTest.Question,
boundSeriesByIntegrating.Question,
]
class SecondQuiz(Quiz):
title = 'Homework 2'
preamble = 'This homework, like all the homeworks in this course, is an example of “formative assessment.” As such, this homework is not so much about you showing me how much you have learned; the final exam will handle that. Rather, this homework is part of the process of learning. Use the hints when you get stuck. Discuss freely on the forums. Take the quiz again and again. Feel free to use the provided resources in whatever way helps you to understand the material. I want you to succeed, and, with practice, I know you will. ~jim'
question_classes = [
defineValueOfSeries.Question,
evaluateGeometricSeriesFraction.Question,
analyzeGeometricSeries.Question,
evaluateTelescopingSeries.Question,
applyNthTermTest.Question,
analyzeHarmonicSeries.Question,
applyComparisonTest.Question,
]
class FirstQuiz(Quiz):
title = 'Homework 1'
preamble = 'This homework, like all the homeworks in this course, is an example of “formative assessment.” As such, this homework is not so much about you showing me how much you have learned; the final exam will handle that. Rather, this homework is part of the process of learning. Use the hints when you get stuck. Discuss freely on the forums. Take the quiz again and again. Feel free to use the provided resources in whatever way helps you to understand the material. I want you to succeed, and, with practice, I know you will. ~jim'
question_classes = [
computeSequenceRecursively.Question,
defineLimitOfSequence.Question,
findSequenceLimit.Question,
findSufficientlyLargeForEpsilon.Question,
defineSequenceBounded.Question,
determineSequenceBounded.Question,
identifyArithmeticProgression.Question,
identifyGeometricProgression.Question,
identifyMonotonicSequence.Question,
applyMonotoneConvergence.Question,
]
class TheFinalExam(Quiz):
title = 'Final Exam'
preamble = 'This is the final exam. You can attempt it twice. This final exam is worth 180 points, so each of the 20 questions is worth 9 points. You will find that there are no hints available, but there are links to lecture videos and to the forums. Please feel free to discuss the ideas behind the problems, to rewatch lecture videos, to reread the textbook, but please do not post or share solutions until November 16. I would like everyone to have a chance to attempt the problems on their own.'
question_classes = [
defineLimitOfSequence.Question,
defineValueOfSeries.Question,
identifyMonotonicSequence.Question,
determineSequenceBounded.Question,
evaluateGeometricSeriesFraction.Question,
comparePSeries.Question,
evaluateTelescopingSeries.Question,
analyzeHarmonicSeries.Question,
relateAbsoluteConvergenceToConvergence.Question,
identifyAlternatingPSeries.Question,
applyRatioTestWithFactorialsAndPowers.Question,
applyRootTest.Question,
findIntervalOfConvergence.Question,
findRadiusOfConvergence.Question,
transformGeometricSeries.Question,
findBeginningOfTaylorSeries.Question,
computeTaylorSeriesForPolynomial.Question,
# boundFunctionWithTaylorsTheorem.Question,
evaluateLimitWithSeries.Question,
findDerivativeFromTaylorSeries.Question,
approximateArctangent.Question,
]
#f = open('final.xml','w')
#f.write(TheFinalExam.coursera_xml())
#f.close()
f = open('question.tex','w')
f.write(analyzeGeometricSeries.Question.ximera())
f.close()
# defineValueOfSeries
# evaluateGeometricSeriesFraction
# evaluateTelescopingSeries
# analyzeGeometricSeries
# applyNthTermTest
# applyComparisonTest
# analyzeHarmonicSeries
#TheFinalExam.forum_list()
| gpl-3.0 |
taigaio/taiga-ncurses | taiga_ncurses/ui/widgets/wiki.py | 3 | 2116 | # -*- coding: utf-8 -*-
"""
taiga_ncurses.ui.widgets.wiki
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import urwid
from taiga_ncurses import data
from . import generic
class WikiPage(urwid.WidgetWrap):
on_wiki_page_change = None
def __init__(self, project):
self.project = project
self.widget = urwid.Pile([generic.ListText("No page found")])
super().__init__(self.widget)
def populate(self, wiki_pages, wiki_page):
items = tuple((data.slug(p), p) for p in wiki_pages)
selected = wiki_page
pages_combo = generic.ComboBox(items, selected_value=selected, style="cyan",
on_state_change=self.on_wiki_page_change)
page_combo_size = max([len(p["slug"]) for p in wiki_pages]) + 8
content_widget = urwid.Edit(edit_text=data.content(wiki_page), multiline=True, wrap='any',
allow_tab=True)
self.widget.contents = [
(generic.RowDivider(div_char=" "), ("weight", 0.1)),
(urwid.Padding(urwid.LineBox(pages_combo), "center", page_combo_size, 10, 0, 0), ('weight', 1)),
(generic.RowDivider(div_char=" "), ("weight", 0.1)),
(content_widget, ('pack', None)),
(generic.RowDivider(div_char=" "), ("weight", 0.1)),
(urwid.Padding(self._buttons(), right=2, left=2), ('weight', 1)),
(generic.RowDivider(div_char=" "), ("weight", 0.1))
]
self.widget.contents.focus = 3
def _buttons(self):
self.save_button = generic.PlainButton("Save")
self.reset_button = generic.PlainButton("Reset")
colum_items = [("weight", 1, urwid.Text(""))]
colum_items.append((15, urwid.AttrMap(urwid.Padding(self.save_button, right=2, left=2),
"submit-button") ))
colum_items.append((2, urwid.Text(" ")))
colum_items.append((15, urwid.AttrMap(urwid.Padding(self.reset_button, right=1, left=2),
"cancel-button") ))
return urwid.Columns(colum_items)
| apache-2.0 |
we350z/googlePythonClassSolutions | babynames/babynames.py | 112 | 1886 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
return
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
if __name__ == '__main__':
main()
| apache-2.0 |
gauribhoite/personfinder | env/google_appengine/lib/django-1.4/django/contrib/auth/management/commands/changepassword.py | 97 | 1881 | import getpass
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
)
help = "Change a user's password for django.contrib.auth."
requires_model_validation = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=prompt)
if not p:
raise CommandError("aborted")
return p
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
try:
u = User.objects.using(options.get('database')).get(username=username)
except User.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u.username)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
while p1 != p2 and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count = count + 1
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (username, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u.username
| apache-2.0 |
AuyaJackie/odoo | addons/crm/wizard/__init__.py | 377 | 1169 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_partner_binding
import crm_phonecall_to_phonecall
import crm_lead_to_opportunity
import crm_merge_opportunities
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
willemt/docopt2ragel | setup.py | 1 | 1202 | from setuptools import setup, find_packages
import codecs
from os import path
here = path.abspath(path.dirname(__file__))
def long_description():
with codecs.open('README.rst', encoding='utf8') as f:
return f.read()
setup(
name='docopt2ragel',
version='0.1.3',
description='Convert your docopt usage text into a Ragel FSM',
long_description=long_description(),
# The project's main homepage.
url='https://github.com/willemt/docopt2ragel',
author='willemt',
author_email='himself@willemthiart.com',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: System :: Logging',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='development',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['docopt'],
include_package_data=True,
package_data={
'': ['template.rl']
},
entry_points={
'console_scripts': [
'docopt2ragel = docopt2ragel.__main__:main',
],
},
)
| bsd-3-clause |
ttglennhall/DjangoGirlsTutorial | myvenv/lib/python3.4/site-packages/django/db/models/options.py | 30 | 35264 | from __future__ import unicode_literals
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import ManyToManyField
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name')
class raise_deprecation(object):
def __init__(self, suggested_alternative):
self.suggested_alternative = suggested_alternative
def __call__(self, fn):
def wrapper(*args, **kwargs):
warnings.warn(
"'%s is an unofficial API that has been deprecated. "
"You may be able to replace it with '%s'" % (
fn.__name__,
self.suggested_alternative,
),
RemovedInDjango20Warning, stacklevel=2
)
return fn(*args, **kwargs)
return wrapper
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@lru_cache(maxsize=None)
def _map_model(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# It maps a field to (field, model or related_model,) depending on the
# field type.
model = link.model._meta.concrete_model
if model is self.model:
model = None
return link, model
@lru_cache(maxsize=None)
def _map_model_details(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# This function maps a field to a tuple of:
# (field, model or related_model, direct, is_m2m) depending on the
# field type.
direct = not link.auto_created or link.concrete
model = link.model._meta.concrete_model
if model is self.model:
model = None
m2m = link.is_relation and link.many_to_many
return link, model, direct, m2m
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
ut = meta_attrs.pop('unique_together', self.unique_together)
self.unique_together = normalize_together(ut)
it = meta_attrs.pop('index_together', self.index_together)
self.index_together = normalize_together(it)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.rel, 'to') and field.rel.to:
try:
field.rel.to._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.model_name)
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.rel, 'to') and f.rel.to)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_fields_with_model(self):
return [self._map_model(f) for f in self.get_fields()]
@raise_deprecation(suggested_alternative="get_fields()")
def get_concrete_fields_with_model(self):
return [self._map_model(f) for f in self.concrete_fields]
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_m2m_with_model(self):
return [self._map_model(f) for f in self.many_to_many]
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name, many_to_many=None):
"""
Returns a field instance given a field name. The field can be either a
forward or reverse field, unless many_to_many is specified; if it is,
only forward fields will be returned.
The many_to_many argument exists for backwards compatibility reasons;
it has been deprecated and will be removed in Django 2.0.
"""
m2m_in_kwargs = many_to_many is not None
if m2m_in_kwargs:
# Always throw a warning if many_to_many is used regardless of
# whether it alters the return type or not.
warnings.warn(
"The 'many_to_many' argument on get_field() is deprecated; "
"use a filter on field.many_to_many instead.",
RemovedInDjango20Warning
)
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
field = self._forward_fields_map[field_name]
if many_to_many is False and field.many_to_many:
raise FieldDoesNotExist(
'%s has no field named %r' % (self.object_name, field_name)
)
return field
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
if m2m_in_kwargs:
# Previous API does not allow searching reverse fields.
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
@raise_deprecation(suggested_alternative="get_field()")
def get_field_by_name(self, name):
return self._map_model_details(self.get_field(name))
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_field_names(self):
names = set()
fields = self.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != self.model and
field.model._meta.concrete_model == self.concrete_model):
continue
names.add(field.name)
if hasattr(field, 'attname'):
names.add(field.attname)
return list(names)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
include_parents = True if local_only is False else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents,
include_hidden=include_hidden,
)
fields = (obj for obj in fields if not isinstance(obj.field, ManyToManyField))
if include_proxy_eq:
children = chain.from_iterable(c._relation_tree
for c in self.concrete_model._meta.proxied_children
if c is not self)
relations = (f.rel for f in children
if include_hidden or not f.rel.field.rel.is_hidden())
fields = chain(fields, relations)
return list(fields)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [
self._map_model(f) for f in self.get_all_related_objects(
local_only=local_only,
include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq,
)
]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_many_to_many_objects(self, local_only=False):
include_parents = True if local_only is not True else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents, include_hidden=True
)
return [obj for obj in fields if isinstance(obj.field, ManyToManyField)]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_m2m_objects_with_model(self):
fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return [self._map_model(obj) for obj in fields if isinstance(obj.field, ManyToManyField)]
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a grandparent or even more distant relation.
"""
if not self.parents:
return None
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return None
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.rel.to, six.string_types):
related_objects_graph[f.rel.to._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default will only
return forward fields. This can be changed by enabling or disabling
field types using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.rel.hidden:
fields.append(field.rel)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
| mit |
mm1ke/portage | pym/_emerge/actions.py | 1 | 108119 | # Copyright 1999-2016 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
import errno
import logging
import operator
import platform
import pwd
import random
import re
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import textwrap
import time
import warnings
from itertools import chain
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.dbapi._similar_name_search:similar_name_search',
'portage.debug',
'portage.news:count_unread_news,display_news_notifications',
'portage.util._get_vm_info:get_vm_info',
'portage.util.locale:check_locale',
'portage.emaint.modules.sync.sync:SyncRepos',
'_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
'_emerge.help:help@emerge_help',
'_emerge.post_emerge:display_news_notification,post_emerge',
'_emerge.stdout_spinner:stdout_spinner',
)
from portage import os
from portage import shutil
from portage import eapi_is_supported, _encodings, _unicode_decode
from portage.cache.cache_errors import CacheError
from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
from portage.const import SUPPORTED_BINPKG_FORMATS, TIMESTAMP_FORMAT
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi._expand_new_virt import expand_new_virt
from portage.dbapi.IndexedPortdb import IndexedPortdb
from portage.dbapi.IndexedVardb import IndexedVardb
from portage.dep import Atom, _repo_separator, _slot_separator
from portage.eclass_cache import hashed_path
from portage.exception import InvalidAtom, InvalidData, ParseError
from portage.output import blue, colorize, create_color_func, darkgreen, \
red, xtermTitle, xtermTitleReset, yellow
good = create_color_func("GOOD")
bad = create_color_func("BAD")
warn = create_color_func("WARN")
from portage.package.ebuild._ipc.QueryCommand import QueryCommand
from portage.package.ebuild.doebuild import _check_temp_dir
from portage._sets import load_default_config, SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import cmp_sort_key, writemsg, varexpand, \
writemsg_level, writemsg_stdout
from portage.util.digraph import digraph
from portage.util.SlotObject import SlotObject
from portage.util._async.run_main_scheduler import run_main_scheduler
from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.global_event_loop import global_event_loop
from portage._global_updates import _global_updates
from portage.sync.old_tree_timestamp import old_tree_timestamp_warn
from portage.localization import _
from portage.metadata import action_metadata
from portage.emaint.main import print_results
from _emerge.clear_caches import clear_caches
from _emerge.countdown import countdown
from _emerge.create_depgraph_params import create_depgraph_params
from _emerge.Dependency import Dependency
from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.emergelog import emergelog
from _emerge.is_valid_package_atom import is_valid_package_atom
from _emerge.MetadataRegen import MetadataRegen
from _emerge.Package import Package
from _emerge.ProgressHandler import ProgressHandler
from _emerge.RootConfig import RootConfig
from _emerge.Scheduler import Scheduler
from _emerge.search import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.unmerge import unmerge
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.UserQuery import UserQuery
if sys.hexversion >= 0x3000000:
long = int
_unicode = str
else:
_unicode = unicode
def action_build(emerge_config, trees=DeprecationWarning,
mtimedb=DeprecationWarning, myopts=DeprecationWarning,
myaction=DeprecationWarning, myfiles=DeprecationWarning, spinner=None):
if not isinstance(emerge_config, _emerge_config):
warnings.warn("_emerge.actions.action_build() now expects "
"an _emerge_config instance as the first parameter",
DeprecationWarning, stacklevel=2)
emerge_config = load_emerge_config(
action=myaction, args=myfiles, trees=trees, opts=myopts)
adjust_configs(emerge_config.opts, emerge_config.trees)
settings, trees, mtimedb = emerge_config
myopts = emerge_config.opts
myaction = emerge_config.action
myfiles = emerge_config.args
if '--usepkgonly' not in myopts:
old_tree_timestamp_warn(settings['PORTDIR'], settings)
# It's best for config updates in /etc/portage to be processed
# before we get here, so warn if they're not (bug #267103).
chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
# validate the state of the resume data
# so that we can make assumptions later.
for k in ("resume", "resume_backup"):
if k not in mtimedb:
continue
resume_data = mtimedb[k]
if not isinstance(resume_data, dict):
del mtimedb[k]
continue
mergelist = resume_data.get("mergelist")
if not isinstance(mergelist, list):
del mtimedb[k]
continue
for x in mergelist:
if not (isinstance(x, list) and len(x) == 4):
continue
pkg_type, pkg_root, pkg_key, pkg_action = x
if pkg_root not in trees:
# Current $ROOT setting differs,
# so the list must be stale.
mergelist = None
break
if not mergelist:
del mtimedb[k]
continue
resume_opts = resume_data.get("myopts")
if not isinstance(resume_opts, (dict, list)):
del mtimedb[k]
continue
favorites = resume_data.get("favorites")
if not isinstance(favorites, list):
del mtimedb[k]
continue
resume = False
if "--resume" in myopts and \
("resume" in mtimedb or
"resume_backup" in mtimedb):
resume = True
if "resume" not in mtimedb:
mtimedb["resume"] = mtimedb["resume_backup"]
del mtimedb["resume_backup"]
mtimedb.commit()
# "myopts" is a list for backward compatibility.
resume_opts = mtimedb["resume"].get("myopts", [])
if isinstance(resume_opts, list):
resume_opts = dict((k,True) for k in resume_opts)
for opt in ("--ask", "--color", "--skipfirst", "--tree"):
resume_opts.pop(opt, None)
# Current options always override resume_opts.
resume_opts.update(myopts)
myopts.clear()
myopts.update(resume_opts)
if "--debug" in myopts:
writemsg_level("myopts %s\n" % (myopts,))
# Adjust config according to options of the command being resumed.
for myroot in trees:
mysettings = trees[myroot]["vartree"].settings
mysettings.unlock()
adjust_config(myopts, mysettings)
mysettings.lock()
del myroot, mysettings
ldpath_mtimes = mtimedb["ldpath"]
favorites=[]
buildpkgonly = "--buildpkgonly" in myopts
pretend = "--pretend" in myopts
fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
ask = "--ask" in myopts
enter_invalid = '--ask-enter-invalid' in myopts
nodeps = "--nodeps" in myopts
oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
tree = "--tree" in myopts
if nodeps and tree:
tree = False
del myopts["--tree"]
portage.writemsg(colorize("WARN", " * ") + \
"--tree is broken with --nodeps. Disabling...\n")
debug = "--debug" in myopts
verbose = "--verbose" in myopts
quiet = "--quiet" in myopts
myparams = create_depgraph_params(myopts, myaction)
mergelist_shown = False
if pretend or fetchonly:
# make the mtimedb readonly
mtimedb.filename = None
if '--digest' in myopts or 'digest' in settings.features:
if '--digest' in myopts:
msg = "The --digest option"
else:
msg = "The FEATURES=digest setting"
msg += " can prevent corruption from being" + \
" noticed. The `repoman manifest` command is the preferred" + \
" way to generate manifests and it is capable of doing an" + \
" entire repository or category at once."
prefix = bad(" * ")
writemsg(prefix + "\n")
for line in textwrap.wrap(msg, 72):
writemsg("%s%s\n" % (prefix, line))
writemsg(prefix + "\n")
if resume:
favorites = mtimedb["resume"].get("favorites")
if not isinstance(favorites, list):
favorites = []
resume_data = mtimedb["resume"]
mergelist = resume_data["mergelist"]
if mergelist and "--skipfirst" in myopts:
for i, task in enumerate(mergelist):
if isinstance(task, list) and \
task and task[-1] == "merge":
del mergelist[i]
break
success = False
mydepgraph = None
try:
success, mydepgraph, dropped_tasks = resume_depgraph(
settings, trees, mtimedb, myopts, myparams, spinner)
except (portage.exception.PackageNotFound,
depgraph.UnsatisfiedResumeDep) as e:
if isinstance(e, depgraph.UnsatisfiedResumeDep):
mydepgraph = e.depgraph
from portage.output import EOutput
out = EOutput()
resume_data = mtimedb["resume"]
mergelist = resume_data.get("mergelist")
if not isinstance(mergelist, list):
mergelist = []
if mergelist and debug or (verbose and not quiet):
out.eerror("Invalid resume list:")
out.eerror("")
indent = " "
for task in mergelist:
if isinstance(task, list):
out.eerror(indent + str(tuple(task)))
out.eerror("")
if isinstance(e, depgraph.UnsatisfiedResumeDep):
out.eerror("One or more packages are either masked or " + \
"have missing dependencies:")
out.eerror("")
indent = " "
for dep in e.value:
if dep.atom is None:
out.eerror(indent + "Masked package:")
out.eerror(2 * indent + str(dep.parent))
out.eerror("")
else:
out.eerror(indent + str(dep.atom) + " pulled in by:")
out.eerror(2 * indent + str(dep.parent))
out.eerror("")
msg = "The resume list contains packages " + \
"that are either masked or have " + \
"unsatisfied dependencies. " + \
"Please restart/continue " + \
"the operation manually, or use --skipfirst " + \
"to skip the first package in the list and " + \
"any other packages that may be " + \
"masked or have missing dependencies."
for line in textwrap.wrap(msg, 72):
out.eerror(line)
elif isinstance(e, portage.exception.PackageNotFound):
out.eerror("An expected package is " + \
"not available: %s" % str(e))
out.eerror("")
msg = "The resume list contains one or more " + \
"packages that are no longer " + \
"available. Please restart/continue " + \
"the operation manually."
for line in textwrap.wrap(msg, 72):
out.eerror(line)
if success:
if dropped_tasks:
portage.writemsg("!!! One or more packages have been " + \
"dropped due to\n" + \
"!!! masking or unsatisfied dependencies:\n\n",
noiselevel=-1)
for task, atoms in dropped_tasks.items():
if not atoms:
writemsg(" %s is masked or unavailable\n" %
(task,), noiselevel=-1)
else:
writemsg(" %s requires %s\n" %
(task, ", ".join(atoms)), noiselevel=-1)
portage.writemsg("\n", noiselevel=-1)
del dropped_tasks
else:
if mydepgraph is not None:
mydepgraph.display_problems()
if not (ask or pretend):
# delete the current list and also the backup
# since it's probably stale too.
for k in ("resume", "resume_backup"):
mtimedb.pop(k, None)
mtimedb.commit()
return 1
else:
if ("--resume" in myopts):
print(darkgreen("emerge: It seems we have nothing to resume..."))
return os.EX_OK
try:
success, mydepgraph, favorites = backtrack_depgraph(
settings, trees, myopts, myparams, myaction, myfiles, spinner)
except portage.exception.PackageSetNotFound as e:
root_config = trees[settings['EROOT']]['root_config']
display_missing_pkg_set(root_config, e.value)
return 1
if success and mydepgraph.need_config_reload():
load_emerge_config(emerge_config=emerge_config)
adjust_configs(emerge_config.opts, emerge_config.trees)
settings, trees, mtimedb = emerge_config
# After config reload, the freshly instantiated binarytree
# instances need to load remote metadata if --getbinpkg
# is enabled. Use getbinpkg_refresh=False to use cached
# metadata, since the cache is already fresh.
if "--getbinpkg" in emerge_config.opts:
for root_trees in emerge_config.trees.values():
try:
root_trees["bintree"].populate(
getbinpkgs=True,
getbinpkg_refresh=False)
except ParseError as e:
writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
% e, noiselevel=-1)
return 1
if "--autounmask-only" in myopts:
mydepgraph.display_problems()
return 0
if not success:
mydepgraph.display_problems()
return 1
mergecount = None
if "--pretend" not in myopts and \
("--ask" in myopts or "--tree" in myopts or \
"--verbose" in myopts) and \
not ("--quiet" in myopts and "--ask" not in myopts):
if "--resume" in myopts:
mymergelist = mydepgraph.altlist()
if len(mymergelist) == 0:
print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
prompt="Would you like to resume merging these packages?"
else:
retval = mydepgraph.display(
mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
mergecount=0
for x in mydepgraph.altlist():
if isinstance(x, Package) and x.operation == "merge":
mergecount += 1
prompt = None
if mergecount==0:
sets = trees[settings['EROOT']]['root_config'].sets
world_candidates = None
if "selective" in myparams and \
not oneshot and favorites:
# Sets that are not world candidates are filtered
# out here since the favorites list needs to be
# complete for depgraph.loadResumeCommand() to
# operate correctly.
world_candidates = [x for x in favorites \
if not (x.startswith(SETPREFIX) and \
not sets[x[1:]].world_candidate)]
if "selective" in myparams and \
not oneshot and world_candidates:
# Prompt later, inside saveNomergeFavorites.
prompt = None
else:
print()
print("Nothing to merge; quitting.")
print()
return os.EX_OK
elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
prompt="Would you like to fetch the source files for these packages?"
else:
prompt="Would you like to merge these packages?"
print()
uq = UserQuery(myopts)
if prompt is not None and "--ask" in myopts and \
uq.query(prompt, enter_invalid) == "No":
print()
print("Quitting.")
print()
return 128 + signal.SIGINT
# Don't ask again (e.g. when auto-cleaning packages after merge)
if mergecount != 0:
myopts.pop("--ask", None)
if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
if ("--resume" in myopts):
mymergelist = mydepgraph.altlist()
if len(mymergelist) == 0:
print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
else:
retval = mydepgraph.display(
mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
else:
if not mergelist_shown:
# If we haven't already shown the merge list above, at
# least show warnings about missed updates and such.
mydepgraph.display_problems()
need_write_vardb = not Scheduler. \
_opts_no_self_update.intersection(myopts)
need_write_bindb = not any(x in myopts for x in
("--fetchonly", "--fetch-all-uri",
"--pretend", "--usepkgonly")) and \
(any("buildpkg" in trees[eroot]["root_config"].
settings.features for eroot in trees) or
any("buildsyspkg" in trees[eroot]["root_config"].
settings.features for eroot in trees))
if need_write_bindb or need_write_vardb:
eroots = set()
ebuild_eroots = set()
for x in mydepgraph.altlist():
if isinstance(x, Package) and x.operation == "merge":
eroots.add(x.root)
if x.type_name == "ebuild":
ebuild_eroots.add(x.root)
for eroot in eroots:
if need_write_vardb and \
not trees[eroot]["vartree"].dbapi.writable:
writemsg_level("!!! %s\n" %
_("Read-only file system: %s") %
trees[eroot]["vartree"].dbapi._dbroot,
level=logging.ERROR, noiselevel=-1)
return 1
if need_write_bindb and eroot in ebuild_eroots and \
("buildpkg" in trees[eroot]["root_config"].
settings.features or
"buildsyspkg" in trees[eroot]["root_config"].
settings.features) and \
not trees[eroot]["bintree"].dbapi.writable:
writemsg_level("!!! %s\n" %
_("Read-only file system: %s") %
trees[eroot]["bintree"].pkgdir,
level=logging.ERROR, noiselevel=-1)
return 1
if ("--resume" in myopts):
favorites=mtimedb["resume"]["favorites"]
else:
if "resume" in mtimedb and \
"mergelist" in mtimedb["resume"] and \
len(mtimedb["resume"]["mergelist"]) > 1:
mtimedb["resume_backup"] = mtimedb["resume"]
del mtimedb["resume"]
mtimedb.commit()
mydepgraph.saveNomergeFavorites()
if mergecount == 0:
retval = os.EX_OK
else:
mergetask = Scheduler(settings, trees, mtimedb, myopts,
spinner, favorites=favorites,
graph_config=mydepgraph.schedulerGraph())
del mydepgraph
clear_caches(trees)
retval = mergetask.merge()
if retval == os.EX_OK and \
not (buildpkgonly or fetchonly or pretend):
if "yes" == settings.get("AUTOCLEAN"):
portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
unmerge(trees[settings['EROOT']]['root_config'],
myopts, "clean", [],
ldpath_mtimes, autoclean=1)
else:
portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ " AUTOCLEAN is disabled. This can cause serious"
+ " problems due to overlapping packages.\n")
return retval
def action_config(settings, trees, myopts, myfiles):
enter_invalid = '--ask-enter-invalid' in myopts
uq = UserQuery(myopts)
if len(myfiles) != 1:
print(red("!!! config can only take a single package atom at this time\n"))
sys.exit(1)
if not is_valid_package_atom(myfiles[0], allow_repo=True):
portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
sys.exit(1)
print()
try:
pkgs = trees[settings['EROOT']]['vartree'].dbapi.match(myfiles[0])
except portage.exception.AmbiguousPackageName as e:
# Multiple matches thrown from cpv_expand
pkgs = e.args[0]
if len(pkgs) == 0:
print("No packages found.\n")
sys.exit(0)
elif len(pkgs) > 1:
if "--ask" in myopts:
options = []
print("Please select a package to configure:")
idx = 0
for pkg in pkgs:
idx += 1
options.append(str(idx))
print(options[-1]+") "+pkg)
print("X) Cancel")
options.append("X")
idx = uq.query("Selection?", enter_invalid, responses=options)
if idx == "X":
sys.exit(128 + signal.SIGINT)
pkg = pkgs[int(idx)-1]
else:
print("The following packages available:")
for pkg in pkgs:
print("* "+pkg)
print("\nPlease use a specific atom or the --ask option.")
sys.exit(1)
else:
pkg = pkgs[0]
print()
if "--ask" in myopts:
if uq.query("Ready to configure %s?" % pkg, enter_invalid) == "No":
sys.exit(128 + signal.SIGINT)
else:
print("Configuring pkg...")
print()
ebuildpath = trees[settings['EROOT']]['vartree'].dbapi.findname(pkg)
mysettings = portage.config(clone=settings)
vardb = trees[mysettings['EROOT']]['vartree'].dbapi
debug = mysettings.get("PORTAGE_DEBUG") == "1"
retval = portage.doebuild(ebuildpath, "config", settings=mysettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
mydbapi = trees[settings['EROOT']]['vartree'].dbapi, tree="vartree")
if retval == os.EX_OK:
portage.doebuild(ebuildpath, "clean", settings=mysettings,
debug=debug, mydbapi=vardb, tree="vartree")
print()
def action_depclean(settings, trees, ldpath_mtimes,
myopts, action, myfiles, spinner, scheduler=None):
# Kill packages that aren't explicitly merged or are required as a
# dependency of another package. World file is explicit.
# Global depclean or prune operations are not very safe when there are
# missing dependencies since it's unknown how badly incomplete
# the dependency graph is, and we might accidentally remove packages
# that should have been pulled into the graph. On the other hand, it's
# relatively safe to ignore missing deps when only asked to remove
# specific packages.
msg = []
if "preserve-libs" not in settings.features and \
not myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n":
msg.append("Depclean may break link level dependencies. Thus, it is\n")
msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
msg.append("\n")
msg.append("Always study the list of packages to be cleaned for any obvious\n")
msg.append("mistakes. Packages that are part of the world set will always\n")
msg.append("be kept. They can be manually added to this set with\n")
msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
msg.append("package.provided (see portage(5)) will be removed by\n")
msg.append("depclean, even if they are part of the world set.\n")
msg.append("\n")
msg.append("As a safety measure, depclean will not remove any packages\n")
msg.append("unless *all* required dependencies have been resolved. As a\n")
msg.append("consequence of this, it often becomes necessary to run \n")
msg.append("%s" % good("`emerge --update --newuse --deep @world`")
+ " prior to depclean.\n")
if action == "depclean" and "--quiet" not in myopts and not myfiles:
portage.writemsg_stdout("\n")
for x in msg:
portage.writemsg_stdout(colorize("WARN", " * ") + x)
root_config = trees[settings['EROOT']]['root_config']
vardb = root_config.trees['vartree'].dbapi
args_set = InternalPackageSet(allow_repo=True)
if myfiles:
args_set.update(myfiles)
matched_packages = False
for x in args_set:
if vardb.match(x):
matched_packages = True
else:
writemsg_level("--- Couldn't find '%s' to %s.\n" % \
(x.replace("null/", ""), action),
level=logging.WARN, noiselevel=-1)
if not matched_packages:
writemsg_level(">>> No packages selected for removal by %s\n" % \
action)
return 0
# The calculation is done in a separate function so that depgraph
# references go out of scope and the corresponding memory
# is freed before we call unmerge().
rval, cleanlist, ordered, req_pkg_count = \
calc_depclean(settings, trees, ldpath_mtimes,
myopts, action, args_set, spinner)
clear_caches(trees)
if rval != os.EX_OK:
return rval
if cleanlist:
rval = unmerge(root_config, myopts, "unmerge",
cleanlist, ldpath_mtimes, ordered=ordered,
scheduler=scheduler)
if action == "prune":
return rval
if not cleanlist and "--quiet" in myopts:
return rval
set_atoms = {}
for k in ("profile", "system", "selected"):
try:
set_atoms[k] = root_config.setconfig.getSetAtoms(k)
except portage.exception.PackageSetNotFound:
# A nested set could not be resolved, so ignore nested sets.
set_atoms[k] = root_config.sets[k].getAtoms()
print("Packages installed: " + str(len(vardb.cpv_all())))
print("Packages in world: %d" % len(set_atoms["selected"]))
print("Packages in system: %d" % len(set_atoms["system"]))
if set_atoms["profile"]:
print("Packages in profile: %d" % len(set_atoms["profile"]))
print("Required packages: "+str(req_pkg_count))
if "--pretend" in myopts:
print("Number to remove: "+str(len(cleanlist)))
else:
print("Number removed: "+str(len(cleanlist)))
return rval
def calc_depclean(settings, trees, ldpath_mtimes,
myopts, action, args_set, spinner):
allow_missing_deps = bool(args_set)
debug = '--debug' in myopts
xterm_titles = "notitles" not in settings.features
root_len = len(settings["ROOT"])
eroot = settings['EROOT']
root_config = trees[eroot]["root_config"]
psets = root_config.setconfig.psets
deselect = myopts.get('--deselect') != 'n'
required_sets = {}
required_sets['world'] = psets['world']
# When removing packages, a temporary version of the world 'selected'
# set may be used which excludes packages that are intended to be
# eligible for removal.
selected_set = psets['selected']
required_sets['selected'] = selected_set
protected_set = InternalPackageSet()
protected_set_name = '____depclean_protected_set____'
required_sets[protected_set_name] = protected_set
set_error = False
set_atoms = {}
for k in ("profile", "system", "selected"):
try:
set_atoms[k] = root_config.setconfig.getSetAtoms(k)
except portage.exception.PackageSetNotFound as e:
# A nested set could not be resolved, so ignore nested sets.
set_atoms[k] = root_config.sets[k].getAtoms()
writemsg_level(_("!!! The set '%s' "
"contains a non-existent set named '%s'.\n") %
(k, e), level=logging.ERROR, noiselevel=-1)
set_error = True
# Support @profile as an alternative to @system.
if not (set_atoms["system"] or set_atoms["profile"]):
writemsg_level(_("!!! You have no system list.\n"),
level=logging.WARNING, noiselevel=-1)
if not set_atoms["selected"]:
writemsg_level(_("!!! You have no world file.\n"),
level=logging.WARNING, noiselevel=-1)
# Suppress world file warnings unless @world is completely empty,
# since having an empty world file can be a valid state.
try:
world_atoms = bool(root_config.setconfig.getSetAtoms('world'))
except portage.exception.PackageSetNotFound as e:
writemsg_level(_("!!! The set '%s' "
"contains a non-existent set named '%s'.\n") %
("world", e), level=logging.ERROR, noiselevel=-1)
set_error = True
else:
if not world_atoms:
writemsg_level(_("!!! Your @world set is empty.\n"),
level=logging.ERROR, noiselevel=-1)
set_error = True
if set_error:
writemsg_level(_("!!! Aborting due to set configuration "
"errors displayed above.\n"),
level=logging.ERROR, noiselevel=-1)
return 1, [], False, 0
if action == "depclean":
emergelog(xterm_titles, " >>> depclean")
writemsg_level("\nCalculating dependencies ")
resolver_params = create_depgraph_params(myopts, "remove")
resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
resolver._load_vdb()
vardb = resolver._frozen_config.trees[eroot]["vartree"].dbapi
real_vardb = trees[eroot]["vartree"].dbapi
if action == "depclean":
if args_set:
if deselect:
# Start with an empty set.
selected_set = InternalPackageSet()
required_sets['selected'] = selected_set
# Pull in any sets nested within the selected set.
selected_set.update(psets['selected'].getNonAtoms())
# Pull in everything that's installed but not matched
# by an argument atom since we don't want to clean any
# package if something depends on it.
for pkg in vardb:
if spinner:
spinner.update()
try:
if args_set.findAtomForPackage(pkg) is None:
protected_set.add("=" + pkg.cpv)
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
elif action == "prune":
if deselect:
# Start with an empty set.
selected_set = InternalPackageSet()
required_sets['selected'] = selected_set
# Pull in any sets nested within the selected set.
selected_set.update(psets['selected'].getNonAtoms())
# Pull in everything that's installed since we don't
# to prune a package if something depends on it.
protected_set.update(vardb.cp_all())
if not args_set:
# Try to prune everything that's slotted.
for cp in vardb.cp_all():
if len(vardb.cp_list(cp)) > 1:
args_set.add(cp)
# Remove atoms from world that match installed packages
# that are also matched by argument atoms, but do not remove
# them if they match the highest installed version.
for pkg in vardb:
if spinner is not None:
spinner.update()
pkgs_for_cp = vardb.match_pkgs(Atom(pkg.cp))
if not pkgs_for_cp or pkg not in pkgs_for_cp:
raise AssertionError("package expected in matches: " + \
"cp = %s, cpv = %s matches = %s" % \
(pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
highest_version = pkgs_for_cp[-1]
if pkg == highest_version:
# pkg is the highest version
protected_set.add("=" + pkg.cpv)
continue
if len(pkgs_for_cp) <= 1:
raise AssertionError("more packages expected: " + \
"cp = %s, cpv = %s matches = %s" % \
(pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
try:
if args_set.findAtomForPackage(pkg) is None:
protected_set.add("=" + pkg.cpv)
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
if resolver._frozen_config.excluded_pkgs:
excluded_set = resolver._frozen_config.excluded_pkgs
required_sets['__excluded__'] = InternalPackageSet()
for pkg in vardb:
if spinner:
spinner.update()
try:
if excluded_set.findAtomForPackage(pkg):
required_sets['__excluded__'].add("=" + pkg.cpv)
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
pkg._metadata["PROVIDE"], _unicode(e))
del e
required_sets['__excluded__'].add("=" + pkg.cpv)
success = resolver._complete_graph(required_sets={eroot:required_sets})
writemsg_level("\b\b... done!\n")
resolver.display_problems()
if not success:
return 1, [], False, 0
def unresolved_deps():
soname_deps = set()
unresolvable = set()
for dep in resolver._dynamic_config._initially_unsatisfied_deps:
if isinstance(dep.parent, Package) and \
(dep.priority > UnmergeDepPriority.SOFT):
if dep.atom.soname:
soname_deps.add((dep.atom, dep.parent.cpv))
else:
unresolvable.add((dep.atom, dep.parent.cpv))
if soname_deps:
# Generally, broken soname dependencies can safely be
# suppressed by a REQUIRES_EXCLUDE setting in the ebuild,
# so they should only trigger a warning message.
prefix = warn(" * ")
msg = []
msg.append("Broken soname dependencies found:")
msg.append("")
for atom, parent in soname_deps:
msg.append(" %s required by:" % (atom,))
msg.append(" %s" % (parent,))
msg.append("")
writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
level=logging.WARNING, noiselevel=-1)
if not unresolvable:
return False
if unresolvable and not allow_missing_deps:
if "--debug" in myopts:
writemsg("\ndigraph:\n\n", noiselevel=-1)
resolver._dynamic_config.digraph.debug_print()
writemsg("\n", noiselevel=-1)
prefix = bad(" * ")
msg = []
msg.append("Dependencies could not be completely resolved due to")
msg.append("the following required packages not being installed:")
msg.append("")
for atom, parent in unresolvable:
if atom.package and atom != atom.unevaluated_atom and \
vardb.match(_unicode(atom)):
msg.append(" %s (%s) pulled in by:" %
(atom.unevaluated_atom, atom))
else:
msg.append(" %s pulled in by:" % (atom,))
msg.append(" %s" % (parent,))
msg.append("")
msg.extend(textwrap.wrap(
"Have you forgotten to do a complete update prior " + \
"to depclean? The most comprehensive command for this " + \
"purpose is as follows:", 65
))
msg.append("")
msg.append(" " + \
good("emerge --update --newuse --deep --with-bdeps=y @world"))
msg.append("")
msg.extend(textwrap.wrap(
"Note that the --with-bdeps=y option is not required in " + \
"many situations. Refer to the emerge manual page " + \
"(run `man emerge`) for more information about " + \
"--with-bdeps.", 65
))
msg.append("")
msg.extend(textwrap.wrap(
"Also, note that it may be necessary to manually uninstall " + \
"packages that no longer exist in the portage tree, since " + \
"it may not be possible to satisfy their dependencies.", 65
))
if action == "prune":
msg.append("")
msg.append("If you would like to ignore " + \
"dependencies then use %s." % good("--nodeps"))
writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
level=logging.ERROR, noiselevel=-1)
return True
return False
if unresolved_deps():
return 1, [], False, 0
graph = resolver._dynamic_config.digraph.copy()
required_pkgs_total = 0
for node in graph:
if isinstance(node, Package):
required_pkgs_total += 1
def show_parents(child_node):
parent_atoms = \
resolver._dynamic_config._parent_atoms.get(child_node, [])
# Never display the special internal protected_set.
parent_atoms = [parent_atom for parent_atom in parent_atoms
if not (isinstance(parent_atom[0], SetArg) and
parent_atom[0].name == protected_set_name)]
if not parent_atoms:
# With --prune, the highest version can be pulled in without any
# real parent since all installed packages are pulled in. In that
# case there's nothing to show here.
return
parent_atom_dict = {}
for parent, atom in parent_atoms:
parent_atom_dict.setdefault(parent, []).append(atom)
parent_strs = []
for parent, atoms in parent_atom_dict.items():
# Display package atoms and soname
# atoms in separate groups.
atoms = sorted(atoms, reverse=True,
key=operator.attrgetter('package'))
parent_strs.append("%s requires %s" %
(getattr(parent, "cpv", parent),
", ".join(_unicode(atom) for atom in atoms)))
parent_strs.sort()
msg = []
msg.append(" %s pulled in by:\n" % (child_node.cpv,))
for parent_str in parent_strs:
msg.append(" %s\n" % (parent_str,))
msg.append("\n")
portage.writemsg_stdout("".join(msg), noiselevel=-1)
def cmp_pkg_cpv(pkg1, pkg2):
"""Sort Package instances by cpv."""
if pkg1.cpv > pkg2.cpv:
return 1
elif pkg1.cpv == pkg2.cpv:
return 0
else:
return -1
def create_cleanlist():
if "--debug" in myopts:
writemsg("\ndigraph:\n\n", noiselevel=-1)
graph.debug_print()
writemsg("\n", noiselevel=-1)
pkgs_to_remove = []
if action == "depclean":
if args_set:
for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
arg_atom = None
try:
arg_atom = args_set.findAtomForPackage(pkg)
except portage.exception.InvalidDependString:
# this error has already been displayed by now
continue
if arg_atom:
if pkg not in graph:
pkgs_to_remove.append(pkg)
elif "--verbose" in myopts:
show_parents(pkg)
else:
for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
if pkg not in graph:
pkgs_to_remove.append(pkg)
elif "--verbose" in myopts:
show_parents(pkg)
elif action == "prune":
for atom in args_set:
for pkg in vardb.match_pkgs(atom):
if pkg not in graph:
pkgs_to_remove.append(pkg)
elif "--verbose" in myopts:
show_parents(pkg)
if not pkgs_to_remove:
writemsg_level(
">>> No packages selected for removal by %s\n" % action)
if "--verbose" not in myopts:
writemsg_level(
">>> To see reverse dependencies, use %s\n" % \
good("--verbose"))
if action == "prune":
writemsg_level(
">>> To ignore dependencies, use %s\n" % \
good("--nodeps"))
return pkgs_to_remove
cleanlist = create_cleanlist()
clean_set = set(cleanlist)
depclean_lib_check = cleanlist and real_vardb._linkmap is not None and \
myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n"
preserve_libs = "preserve-libs" in settings.features
preserve_libs_restrict = False
if depclean_lib_check and preserve_libs:
for pkg in cleanlist:
if "preserve-libs" in pkg.restrict:
preserve_libs_restrict = True
break
if depclean_lib_check and \
(preserve_libs_restrict or not preserve_libs):
# Check if any of these packages are the sole providers of libraries
# with consumers that have not been selected for removal. If so, these
# packages and any dependencies need to be added to the graph.
linkmap = real_vardb._linkmap
consumer_cache = {}
provider_cache = {}
consumer_map = {}
writemsg_level(">>> Checking for lib consumers...\n")
for pkg in cleanlist:
if preserve_libs and "preserve-libs" not in pkg.restrict:
# Any needed libraries will be preserved
# when this package is unmerged, so there's
# no need to account for it here.
continue
pkg_dblink = real_vardb._dblink(pkg.cpv)
consumers = {}
for lib in pkg_dblink.getcontents():
lib = lib[root_len:]
lib_key = linkmap._obj_key(lib)
lib_consumers = consumer_cache.get(lib_key)
if lib_consumers is None:
try:
lib_consumers = linkmap.findConsumers(lib_key)
except KeyError:
continue
consumer_cache[lib_key] = lib_consumers
if lib_consumers:
consumers[lib_key] = lib_consumers
if not consumers:
continue
for lib, lib_consumers in list(consumers.items()):
for consumer_file in list(lib_consumers):
if pkg_dblink.isowner(consumer_file):
lib_consumers.remove(consumer_file)
if not lib_consumers:
del consumers[lib]
if not consumers:
continue
for lib, lib_consumers in consumers.items():
soname = linkmap.getSoname(lib)
consumer_providers = []
for lib_consumer in lib_consumers:
providers = provider_cache.get(lib)
if providers is None:
providers = linkmap.findProviders(lib_consumer)
provider_cache[lib_consumer] = providers
if soname not in providers:
# Why does this happen?
continue
consumer_providers.append(
(lib_consumer, providers[soname]))
consumers[lib] = consumer_providers
consumer_map[pkg] = consumers
if consumer_map:
search_files = set()
for consumers in consumer_map.values():
for lib, consumer_providers in consumers.items():
for lib_consumer, providers in consumer_providers:
search_files.add(lib_consumer)
search_files.update(providers)
writemsg_level(">>> Assigning files to packages...\n")
file_owners = {}
for f in search_files:
owner_set = set()
for owner in linkmap.getOwners(f):
owner_dblink = real_vardb._dblink(owner)
if owner_dblink.exists():
owner_set.add(owner_dblink)
if owner_set:
file_owners[f] = owner_set
for pkg, consumers in list(consumer_map.items()):
for lib, consumer_providers in list(consumers.items()):
lib_consumers = set()
for lib_consumer, providers in consumer_providers:
owner_set = file_owners.get(lib_consumer)
provider_dblinks = set()
provider_pkgs = set()
if len(providers) > 1:
for provider in providers:
provider_set = file_owners.get(provider)
if provider_set is not None:
provider_dblinks.update(provider_set)
if len(provider_dblinks) > 1:
for provider_dblink in provider_dblinks:
provider_pkg = resolver._pkg(
provider_dblink.mycpv, "installed",
root_config, installed=True)
if provider_pkg not in clean_set:
provider_pkgs.add(provider_pkg)
if provider_pkgs:
continue
if owner_set is not None:
lib_consumers.update(owner_set)
for consumer_dblink in list(lib_consumers):
if resolver._pkg(consumer_dblink.mycpv, "installed",
root_config, installed=True) in clean_set:
lib_consumers.remove(consumer_dblink)
continue
if lib_consumers:
consumers[lib] = lib_consumers
else:
del consumers[lib]
if not consumers:
del consumer_map[pkg]
if consumer_map:
# TODO: Implement a package set for rebuilding consumer packages.
msg = "In order to avoid breakage of link level " + \
"dependencies, one or more packages will not be removed. " + \
"This can be solved by rebuilding " + \
"the packages that pulled them in."
prefix = bad(" * ")
writemsg_level("".join(prefix + "%s\n" % line for \
line in textwrap.wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
msg = []
for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
consumers = consumer_map[pkg]
consumer_libs = {}
for lib, lib_consumers in consumers.items():
for consumer in lib_consumers:
consumer_libs.setdefault(
consumer.mycpv, set()).add(linkmap.getSoname(lib))
unique_consumers = set(chain(*consumers.values()))
unique_consumers = sorted(consumer.mycpv \
for consumer in unique_consumers)
msg.append("")
msg.append(" %s pulled in by:" % (pkg.cpv,))
for consumer in unique_consumers:
libs = consumer_libs[consumer]
msg.append(" %s needs %s" % \
(consumer, ', '.join(sorted(libs))))
msg.append("")
writemsg_level("".join(prefix + "%s\n" % line for line in msg),
level=logging.WARNING, noiselevel=-1)
# Add lib providers to the graph as children of lib consumers,
# and also add any dependencies pulled in by the provider.
writemsg_level(">>> Adding lib providers to graph...\n")
for pkg, consumers in consumer_map.items():
for consumer_dblink in set(chain(*consumers.values())):
consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
"installed", root_config, installed=True)
if not resolver._add_pkg(pkg,
Dependency(parent=consumer_pkg,
priority=UnmergeDepPriority(runtime=True,
runtime_slot_op=True),
root=pkg.root)):
resolver.display_problems()
return 1, [], False, 0
writemsg_level("\nCalculating dependencies ")
success = resolver._complete_graph(
required_sets={eroot:required_sets})
writemsg_level("\b\b... done!\n")
resolver.display_problems()
if not success:
return 1, [], False, 0
if unresolved_deps():
return 1, [], False, 0
graph = resolver._dynamic_config.digraph.copy()
required_pkgs_total = 0
for node in graph:
if isinstance(node, Package):
required_pkgs_total += 1
cleanlist = create_cleanlist()
if not cleanlist:
return 0, [], False, required_pkgs_total
clean_set = set(cleanlist)
if clean_set:
writemsg_level(">>> Calculating removal order...\n")
# Use a topological sort to create an unmerge order such that
# each package is unmerged before it's dependencies. This is
# necessary to avoid breaking things that may need to run
# during pkg_prerm or pkg_postrm phases.
# Create a new graph to account for dependencies between the
# packages being unmerged.
graph = digraph()
del cleanlist[:]
runtime = UnmergeDepPriority(runtime=True)
runtime_post = UnmergeDepPriority(runtime_post=True)
buildtime = UnmergeDepPriority(buildtime=True)
priority_map = {
"RDEPEND": runtime,
"PDEPEND": runtime_post,
"HDEPEND": buildtime,
"DEPEND": buildtime,
}
for node in clean_set:
graph.add(node, None)
for dep_type in Package._dep_keys:
depstr = node._metadata[dep_type]
if not depstr:
continue
priority = priority_map[dep_type]
if debug:
writemsg_level("\nParent: %s\n"
% (node,), noiselevel=-1, level=logging.DEBUG)
writemsg_level( "Depstring: %s\n"
% (depstr,), noiselevel=-1, level=logging.DEBUG)
writemsg_level( "Priority: %s\n"
% (priority,), noiselevel=-1, level=logging.DEBUG)
try:
atoms = resolver._select_atoms(eroot, depstr,
myuse=node.use.enabled, parent=node,
priority=priority)[node]
except portage.exception.InvalidDependString:
# Ignore invalid deps of packages that will
# be uninstalled anyway.
continue
if debug:
writemsg_level("Candidates: [%s]\n" % \
', '.join("'%s'" % (x,) for x in atoms),
noiselevel=-1, level=logging.DEBUG)
for atom in atoms:
if not isinstance(atom, portage.dep.Atom):
# Ignore invalid atoms returned from dep_check().
continue
if atom.blocker:
continue
matches = vardb.match_pkgs(atom)
if not matches:
continue
for child_node in matches:
if child_node in clean_set:
mypriority = priority.copy()
if atom.slot_operator_built:
if mypriority.buildtime:
mypriority.buildtime_slot_op = True
if mypriority.runtime:
mypriority.runtime_slot_op = True
graph.add(child_node, node, priority=mypriority)
if debug:
writemsg_level("\nunmerge digraph:\n\n",
noiselevel=-1, level=logging.DEBUG)
graph.debug_print()
writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
ordered = True
if len(graph.order) == len(graph.root_nodes()):
# If there are no dependencies between packages
# let unmerge() group them by cat/pn.
ordered = False
cleanlist = [pkg.cpv for pkg in graph.order]
else:
# Order nodes from lowest to highest overall reference count for
# optimal root node selection (this can help minimize issues
# with unaccounted implicit dependencies).
node_refcounts = {}
for node in graph.order:
node_refcounts[node] = len(graph.parent_nodes(node))
def cmp_reference_count(node1, node2):
return node_refcounts[node1] - node_refcounts[node2]
graph.order.sort(key=cmp_sort_key(cmp_reference_count))
ignore_priority_range = [None]
ignore_priority_range.extend(
range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
while graph:
for ignore_priority in ignore_priority_range:
nodes = graph.root_nodes(ignore_priority=ignore_priority)
if nodes:
break
if not nodes:
raise AssertionError("no root nodes")
if ignore_priority is not None:
# Some deps have been dropped due to circular dependencies,
# so only pop one node in order to minimize the number that
# are dropped.
del nodes[1:]
for node in nodes:
graph.remove(node)
cleanlist.append(node.cpv)
return 0, cleanlist, ordered, required_pkgs_total
return 0, [], False, required_pkgs_total
def action_deselect(settings, trees, opts, atoms):
enter_invalid = '--ask-enter-invalid' in opts
root_config = trees[settings['EROOT']]['root_config']
world_set = root_config.sets['selected']
if not hasattr(world_set, 'update'):
writemsg_level("World @selected set does not appear to be mutable.\n",
level=logging.ERROR, noiselevel=-1)
return 1
pretend = '--pretend' in opts
locked = False
if not pretend and hasattr(world_set, 'lock'):
world_set.lock()
locked = True
try:
world_set.load()
world_atoms = world_set.getAtoms()
vardb = root_config.trees["vartree"].dbapi
expanded_atoms = set(atoms)
for atom in atoms:
if not atom.startswith(SETPREFIX):
if atom.cp.startswith("null/"):
# try to expand category from world set
null_cat, pn = portage.catsplit(atom.cp)
for world_atom in world_atoms:
cat, world_pn = portage.catsplit(world_atom.cp)
if pn == world_pn:
expanded_atoms.add(
Atom(atom.replace("null", cat, 1),
allow_repo=True, allow_wildcard=True))
for cpv in vardb.match(atom):
pkg = vardb._pkg_str(cpv, None)
expanded_atoms.add(Atom("%s:%s" % (pkg.cp, pkg.slot)))
discard_atoms = set()
for atom in world_set:
for arg_atom in expanded_atoms:
if arg_atom.startswith(SETPREFIX):
if atom.startswith(SETPREFIX) and \
arg_atom == atom:
discard_atoms.add(atom)
break
else:
if not atom.startswith(SETPREFIX) and \
arg_atom.intersects(atom) and \
not (arg_atom.slot and not atom.slot) and \
not (arg_atom.repo and not atom.repo):
discard_atoms.add(atom)
break
if discard_atoms:
for atom in sorted(discard_atoms):
if pretend:
action_desc = "Would remove"
else:
action_desc = "Removing"
if atom.startswith(SETPREFIX):
filename = "world_sets"
else:
filename = "world"
writemsg_stdout(
">>> %s %s from \"%s\" favorites file...\n" %
(action_desc, colorize("INFORM", _unicode(atom)),
filename), noiselevel=-1)
if '--ask' in opts:
prompt = "Would you like to remove these " + \
"packages from your world favorites?"
uq = UserQuery(opts)
if uq.query(prompt, enter_invalid) == 'No':
return 128 + signal.SIGINT
remaining = set(world_set)
remaining.difference_update(discard_atoms)
if not pretend:
world_set.replace(remaining)
else:
print(">>> No matching atoms found in \"world\" favorites file...")
finally:
if locked:
world_set.unlock()
return os.EX_OK
class _info_pkgs_ver(object):
def __init__(self, ver, repo_suffix, provide_suffix):
self.ver = ver
self.repo_suffix = repo_suffix
self.provide_suffix = provide_suffix
def __lt__(self, other):
return portage.versions.vercmp(self.ver, other.ver) < 0
def toString(self):
"""
This may return unicode if repo_name contains unicode.
Don't use __str__ and str() since unicode triggers compatibility
issues between python 2.x and 3.x.
"""
return self.ver + self.repo_suffix + self.provide_suffix
def action_info(settings, trees, myopts, myfiles):
# See if we can find any packages installed matching the strings
# passed on the command line
mypkgs = []
eroot = settings['EROOT']
vardb = trees[eroot]["vartree"].dbapi
portdb = trees[eroot]['porttree'].dbapi
bindb = trees[eroot]["bintree"].dbapi
repos = portdb.settings.repositories
for x in myfiles:
any_match = False
cp_exists = bool(vardb.match(x.cp))
installed_match = vardb.match(x)
for installed in installed_match:
mypkgs.append((installed, "installed"))
any_match = True
if any_match:
continue
for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
if pkg_type == "binary" and "--usepkg" not in myopts:
continue
# Use match instead of cp_list, to account for old-style virtuals.
if not cp_exists and db.match(x.cp):
cp_exists = True
# Search for masked packages too.
if not cp_exists and hasattr(db, "xmatch") and \
db.xmatch("match-all", x.cp):
cp_exists = True
matches = db.match(x)
matches.reverse()
for match in matches:
if pkg_type == "binary":
if db.bintree.isremote(match):
continue
auxkeys = ["EAPI", "DEFINED_PHASES"]
metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
if metadata["EAPI"] not in ("0", "1", "2", "3") and \
"info" in metadata["DEFINED_PHASES"].split():
mypkgs.append((match, pkg_type))
break
if not cp_exists:
xinfo = '"%s"' % x.unevaluated_atom
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
if settings["ROOT"] != "/":
xinfo = "%s for %s" % (xinfo, eroot)
writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
colorize("INFORM", xinfo), noiselevel=-1)
if myopts.get("--misspell-suggestions", "y") != "n":
writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
search_index = myopts.get("--search-index", "y") != "n"
dbs = [IndexedVardb(vardb) if search_index else vardb]
#if "--usepkgonly" not in myopts:
dbs.append(IndexedPortdb(portdb) if search_index else portdb)
if "--usepkg" in myopts:
dbs.append(bindb)
matches = similar_name_search(dbs, x)
if len(matches) == 1:
writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
, noiselevel=-1)
elif len(matches) > 1:
writemsg(
"\nemerge: Maybe you meant any of these: %s?\n" % \
(", ".join(matches),), noiselevel=-1)
else:
# Generally, this would only happen if
# all dbapis are empty.
writemsg(" nothing similar found.\n"
, noiselevel=-1)
return 1
output_buffer = []
append = output_buffer.append
root_config = trees[settings['EROOT']]['root_config']
chost = settings.get("CHOST")
append(getportageversion(settings["PORTDIR"], None,
settings.profile_path, chost,
trees[settings['EROOT']]["vartree"].dbapi))
header_width = 65
header_title = "System Settings"
if myfiles:
append(header_width * "=")
append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
append(header_width * "=")
append("System uname: %s" % (platform.platform(aliased=1),))
vm_info = get_vm_info()
if "ram.total" in vm_info:
line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] // 1024)
if "ram.free" in vm_info:
line += ",%10d free" % (vm_info["ram.free"] // 1024,)
append(line)
if "swap.total" in vm_info:
line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] // 1024)
if "swap.free" in vm_info:
line += ",%10d free" % (vm_info["swap.free"] // 1024,)
append(line)
for repo in repos:
last_sync = portage.grabfile(os.path.join(repo.location, "metadata", "timestamp.chk"))
head_commit = None
if last_sync:
append("Timestamp of repository %s: %s" % (repo.name, last_sync[0]))
if repo.sync_type:
sync = portage.sync.module_controller.get_class(repo.sync_type)()
options = { 'repo': repo }
try:
head_commit = sync.retrieve_head(options=options)
except NotImplementedError:
head_commit = (1, False)
if head_commit and head_commit[0] == os.EX_OK:
append("Head commit of repository %s: %s" % (repo.name, head_commit[1]))
# Searching contents for the /bin/sh provider is somewhat
# slow. Therefore, use the basename of the symlink target
# to locate the package. If this fails, then only the
# basename of the symlink target will be displayed. So,
# typical output is something like "sh bash 4.2_p53". Since
# realpath is used to resolve symlinks recursively, this
# approach is also able to handle multiple levels of symlinks
# such as /bin/sh -> bb -> busybox. Note that we do not parse
# the output of "/bin/sh --version" because many shells
# do not have a --version option.
basename = os.path.basename(os.path.realpath(os.path.join(
os.sep, portage.const.EPREFIX, "bin", "sh")))
try:
Atom("null/%s" % basename)
except InvalidAtom:
matches = None
else:
try:
# Try a match against the basename, which should work for
# busybox and most shells.
matches = (trees[trees._running_eroot]["vartree"].dbapi.
match(basename))
except portage.exception.AmbiguousPackageName:
# If the name is ambiguous, then restrict our match
# to the app-shells category.
matches = (trees[trees._running_eroot]["vartree"].dbapi.
match("app-shells/%s" % basename))
if matches:
pkg = matches[-1]
name = pkg.cp
version = pkg.version
# Omit app-shells category from the output.
if name.startswith("app-shells/"):
name = name[len("app-shells/"):]
sh_str = "%s %s" % (name, version)
else:
sh_str = basename
append("sh %s" % sh_str)
ld_names = []
if chost:
ld_names.append(chost + "-ld")
ld_names.append("ld")
for name in ld_names:
try:
proc = subprocess.Popen([name, "--version"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
pass
else:
output = _unicode_decode(proc.communicate()[0]).splitlines()
proc.wait()
if proc.wait() == os.EX_OK and output:
append("ld %s" % (output[0]))
break
try:
proc = subprocess.Popen(["distcc", "--version"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
output = (1, None)
else:
output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
output = (proc.wait(), output)
if output[0] == os.EX_OK:
distcc_str = output[1].split("\n", 1)[0]
if "distcc" in settings.features:
distcc_str += " [enabled]"
else:
distcc_str += " [disabled]"
append(distcc_str)
try:
proc = subprocess.Popen(["ccache", "-V"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
output = (1, None)
else:
output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
output = (proc.wait(), output)
if output[0] == os.EX_OK:
ccache_str = output[1].split("\n", 1)[0]
if "ccache" in settings.features:
ccache_str += " [enabled]"
else:
ccache_str += " [disabled]"
append(ccache_str)
myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
"sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
atoms = []
for x in myvars:
try:
x = Atom(x)
except InvalidAtom:
append("%-20s %s" % (x+":", "[NOT VALID]"))
else:
for atom in expand_new_virt(vardb, x):
if not atom.blocker:
atoms.append((x, atom))
myvars = sorted(set(atoms))
cp_map = {}
cp_max_len = 0
for orig_atom, x in myvars:
pkg_matches = vardb.match(x)
versions = []
for cpv in pkg_matches:
matched_cp = portage.versions.cpv_getkey(cpv)
ver = portage.versions.cpv_getversion(cpv)
ver_map = cp_map.setdefault(matched_cp, {})
prev_match = ver_map.get(ver)
if prev_match is not None:
if prev_match.provide_suffix:
# prefer duplicate matches that include
# additional virtual provider info
continue
if len(matched_cp) > cp_max_len:
cp_max_len = len(matched_cp)
repo = vardb.aux_get(cpv, ["repository"])[0]
if repo:
repo_suffix = _repo_separator + repo
else:
repo_suffix = _repo_separator + "<unknown repository>"
if matched_cp == orig_atom.cp:
provide_suffix = ""
else:
provide_suffix = " (%s)" % (orig_atom,)
ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
for cp in sorted(cp_map):
versions = sorted(cp_map[cp].values())
versions = ", ".join(ver.toString() for ver in versions)
append("%s %s" % \
((cp + ":").ljust(cp_max_len + 1), versions))
append("Repositories:\n")
for repo in repos:
append(repo.info_string())
installed_sets = sorted(s for s in
root_config.sets['selected'].getNonAtoms() if s.startswith(SETPREFIX))
if installed_sets:
sets_line = "Installed sets: "
sets_line += ", ".join(installed_sets)
append(sets_line)
if "--verbose" in myopts:
myvars = list(settings)
else:
myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
'PORTAGE_BUNZIP2_COMMAND',
'PORTAGE_BZIP2_COMMAND',
'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'FEATURES',
'EMERGE_DEFAULT_OPTS']
myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
myvars_ignore_defaults = {
'PORTAGE_BZIP2_COMMAND' : 'bzip2',
}
skipped_vars = ['PORTAGE_REPOSITORIES']
# Deprecated variables
skipped_vars.extend(('PORTDIR', 'PORTDIR_OVERLAY', 'SYNC'))
myvars = set(myvars)
myvars.difference_update(skipped_vars)
myvars = sorted(myvars)
use_expand = settings.get('USE_EXPAND', '').split()
use_expand.sort()
unset_vars = []
for k in myvars:
v = settings.get(k)
if v is not None:
if k != "USE":
default = myvars_ignore_defaults.get(k)
if default is not None and \
default == v:
continue
append('%s="%s"' % (k, v))
else:
use = set(v.split())
for varname in use_expand:
flag_prefix = varname.lower() + "_"
for f in list(use):
if f.startswith(flag_prefix):
use.remove(f)
use = list(use)
use.sort()
use = ['USE="%s"' % " ".join(use)]
for varname in use_expand:
myval = settings.get(varname)
if myval:
use.append('%s="%s"' % (varname, myval))
append(" ".join(use))
else:
unset_vars.append(k)
if unset_vars:
append("Unset: "+", ".join(unset_vars))
append("")
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
del output_buffer[:]
# If some packages were found...
if mypkgs:
# Get our global settings (we only print stuff if it varies from
# the current config)
mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
auxkeys.append('DEFINED_PHASES')
pkgsettings = portage.config(clone=settings)
# Loop through each package
# Only print settings if they differ from global settings
header_title = "Package Settings"
append(header_width * "=")
append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
append(header_width * "=")
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
del output_buffer[:]
out = portage.output.EOutput()
for mypkg in mypkgs:
cpv = mypkg[0]
pkg_type = mypkg[1]
# Get all package specific variables
if pkg_type == "installed":
metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
elif pkg_type == "ebuild":
metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
elif pkg_type == "binary":
metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
(metadata.get(x, '') for x in Package.metadata_keys)),
root_config=root_config, type_name=pkg_type)
if pkg_type == "installed":
append("\n%s was built with the following:" % \
colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
elif pkg_type == "ebuild":
append("\n%s would be built with the following:" % \
colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
elif pkg_type == "binary":
append("\n%s (non-installed binary) was built with the following:" % \
colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
append('%s' % pkg_use_display(pkg, myopts))
if pkg_type == "installed":
for myvar in mydesiredvars:
if metadata[myvar].split() != settings.get(myvar, '').split():
append("%s=\"%s\"" % (myvar, metadata[myvar]))
append("")
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
del output_buffer[:]
if metadata['DEFINED_PHASES']:
if 'info' not in metadata['DEFINED_PHASES'].split():
continue
writemsg_stdout(">>> Attempting to run pkg_info() for '%s'\n"
% pkg.cpv, noiselevel=-1)
if pkg_type == "installed":
ebuildpath = vardb.findname(pkg.cpv)
elif pkg_type == "ebuild":
ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
elif pkg_type == "binary":
tbz2_file = bindb.bintree.getname(pkg.cpv)
ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
tmpdir = tempfile.mkdtemp()
ebuildpath = os.path.join(tmpdir, ebuild_file_name)
file = open(ebuildpath, 'w')
file.write(ebuild_file_contents)
file.close()
if not ebuildpath or not os.path.exists(ebuildpath):
out.ewarn("No ebuild found for '%s'" % pkg.cpv)
continue
if pkg_type == "installed":
portage.doebuild(ebuildpath, "info", settings=pkgsettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1),
mydbapi=trees[settings['EROOT']]["vartree"].dbapi,
tree="vartree")
elif pkg_type == "ebuild":
portage.doebuild(ebuildpath, "info", settings=pkgsettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1),
mydbapi=trees[settings['EROOT']]['porttree'].dbapi,
tree="porttree")
elif pkg_type == "binary":
portage.doebuild(ebuildpath, "info", settings=pkgsettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1),
mydbapi=trees[settings['EROOT']]["bintree"].dbapi,
tree="bintree")
shutil.rmtree(tmpdir)
def action_regen(settings, portdb, max_jobs, max_load):
xterm_titles = "notitles" not in settings.features
emergelog(xterm_titles, " === regen")
#regenerate cache entries
sys.stdout.flush()
regen = MetadataRegen(portdb, max_jobs=max_jobs,
max_load=max_load, main=True)
signum = run_main_scheduler(regen)
if signum is not None:
sys.exit(128 + signum)
portage.writemsg_stdout("done!\n")
return regen.returncode
def action_search(root_config, myopts, myfiles, spinner):
if not myfiles:
print("emerge: no search terms provided.")
else:
searchinstance = search(root_config,
spinner, "--searchdesc" in myopts,
"--quiet" not in myopts, "--usepkg" in myopts,
"--usepkgonly" in myopts,
search_index=myopts.get("--search-index", "y") != "n",
search_similarity=myopts.get("--search-similarity"),
fuzzy=myopts.get("--fuzzy-search") != "n",
)
for mysearch in myfiles:
try:
searchinstance.execute(mysearch)
except re.error as comment:
print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
sys.exit(1)
searchinstance.output()
def action_sync(emerge_config, trees=DeprecationWarning,
mtimedb=DeprecationWarning, opts=DeprecationWarning,
action=DeprecationWarning):
if not isinstance(emerge_config, _emerge_config):
warnings.warn("_emerge.actions.action_sync() now expects "
"an _emerge_config instance as the first parameter",
DeprecationWarning, stacklevel=2)
emerge_config = load_emerge_config(
action=action, args=[], trees=trees, opts=opts)
syncer = SyncRepos(emerge_config)
return_messages = "--quiet" not in emerge_config.opts
options = {'return-messages' : return_messages}
if emerge_config.args:
options['repo'] = emerge_config.args
success, msgs = syncer.repo(options=options)
else:
success, msgs = syncer.auto_sync(options=options)
if return_messages:
print_results(msgs)
return os.EX_OK if success else 1
def action_uninstall(settings, trees, ldpath_mtimes,
opts, action, files, spinner):
# For backward compat, some actions do not require leading '='.
ignore_missing_eq = action in ('clean', 'rage-clean', 'unmerge')
root = settings['ROOT']
eroot = settings['EROOT']
vardb = trees[settings['EROOT']]['vartree'].dbapi
valid_atoms = []
lookup_owners = []
# Ensure atoms are valid before calling unmerge().
# For backward compat, leading '=' is not required.
for x in files:
if is_valid_package_atom(x, allow_repo=True) or \
(ignore_missing_eq and is_valid_package_atom('=' + x)):
try:
atom = dep_expand(x, mydb=vardb, settings=settings)
except portage.exception.AmbiguousPackageName as e:
msg = "The short ebuild name \"" + x + \
"\" is ambiguous. Please specify " + \
"one of the following " + \
"fully-qualified ebuild names instead:"
for line in textwrap.wrap(msg, 70):
writemsg_level("!!! %s\n" % (line,),
level=logging.ERROR, noiselevel=-1)
for i in e.args[0]:
writemsg_level(" %s\n" % colorize("INFORM", i),
level=logging.ERROR, noiselevel=-1)
writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
return 1
else:
if atom.use and atom.use.conditional:
writemsg_level(
("\n\n!!! '%s' contains a conditional " + \
"which is not allowed.\n") % (x,),
level=logging.ERROR, noiselevel=-1)
writemsg_level(
"!!! Please check ebuild(5) for full details.\n",
level=logging.ERROR)
return 1
valid_atoms.append(atom)
elif x.startswith(os.sep):
if not x.startswith(eroot):
writemsg_level(("!!! '%s' does not start with" + \
" $EROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
return 1
# Queue these up since it's most efficient to handle
# multiple files in a single iter_owners() call.
lookup_owners.append(x)
elif x.startswith(SETPREFIX) and action == "deselect":
valid_atoms.append(x)
elif "*" in x:
try:
ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
except InvalidAtom:
msg = []
msg.append("'%s' is not a valid package atom." % (x,))
msg.append("Please check ebuild(5) for full details.")
writemsg_level("".join("!!! %s\n" % line for line in msg),
level=logging.ERROR, noiselevel=-1)
return 1
for cpv in vardb.cpv_all():
if portage.match_from_list(ext_atom, [cpv]):
require_metadata = False
atom = portage.cpv_getkey(cpv)
if ext_atom.operator == '=*':
atom = "=" + atom + "-" + \
portage.versions.cpv_getversion(cpv)
if ext_atom.slot:
atom += _slot_separator + ext_atom.slot
require_metadata = True
if ext_atom.repo:
atom += _repo_separator + ext_atom.repo
require_metadata = True
atom = Atom(atom, allow_repo=True)
if require_metadata:
try:
cpv = vardb._pkg_str(cpv, ext_atom.repo)
except (KeyError, InvalidData):
continue
if not portage.match_from_list(atom, [cpv]):
continue
valid_atoms.append(atom)
else:
msg = []
msg.append("'%s' is not a valid package atom." % (x,))
msg.append("Please check ebuild(5) for full details.")
writemsg_level("".join("!!! %s\n" % line for line in msg),
level=logging.ERROR, noiselevel=-1)
return 1
if lookup_owners:
relative_paths = []
search_for_multiple = False
if len(lookup_owners) > 1:
search_for_multiple = True
for x in lookup_owners:
if not search_for_multiple and os.path.isdir(x):
search_for_multiple = True
relative_paths.append(x[len(root)-1:])
owners = set()
for pkg, relative_path in \
vardb._owners.iter_owners(relative_paths):
owners.add(pkg.mycpv)
if not search_for_multiple:
break
if owners:
for cpv in owners:
pkg = vardb._pkg_str(cpv, None)
atom = '%s:%s' % (pkg.cp, pkg.slot)
valid_atoms.append(portage.dep.Atom(atom))
else:
writemsg_level(("!!! '%s' is not claimed " + \
"by any package.\n") % lookup_owners[0],
level=logging.WARNING, noiselevel=-1)
if files and not valid_atoms:
return 1
if action == 'unmerge' and \
'--quiet' not in opts and \
'--quiet-unmerge-warn' not in opts:
msg = "This action can remove important packages! " + \
"In order to be safer, use " + \
"`emerge -pv --depclean <atom>` to check for " + \
"reverse dependencies before removing packages."
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 72):
out.ewarn(line)
if action == 'deselect':
return action_deselect(settings, trees, opts, valid_atoms)
# Use the same logic as the Scheduler class to trigger redirection
# of ebuild pkg_prerm/postrm phase output to logs as appropriate
# for options such as --jobs, --quiet and --quiet-build.
max_jobs = opts.get("--jobs", 1)
background = (max_jobs is True or max_jobs > 1 or
"--quiet" in opts or opts.get("--quiet-build") == "y")
sched_iface = SchedulerInterface(global_event_loop(),
is_background=lambda: background)
if background:
settings.unlock()
settings["PORTAGE_BACKGROUND"] = "1"
settings.backup_changes("PORTAGE_BACKGROUND")
settings.lock()
if action in ('clean', 'rage-clean', 'unmerge') or \
(action == 'prune' and "--nodeps" in opts):
# When given a list of atoms, unmerge them in the order given.
ordered = action in ('rage-clean', 'unmerge')
rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
valid_atoms, ldpath_mtimes, ordered=ordered,
scheduler=sched_iface)
else:
rval = action_depclean(settings, trees, ldpath_mtimes,
opts, action, valid_atoms, spinner,
scheduler=sched_iface)
return rval
def adjust_configs(myopts, trees):
for myroot in trees:
mysettings = trees[myroot]["vartree"].settings
mysettings.unlock()
adjust_config(myopts, mysettings)
mysettings.lock()
def adjust_config(myopts, settings):
"""Make emerge specific adjustments to the config."""
# Kill noauto as it will break merges otherwise.
if "noauto" in settings.features:
settings.features.remove('noauto')
fail_clean = myopts.get('--fail-clean')
if fail_clean is not None:
if fail_clean is True and \
'fail-clean' not in settings.features:
settings.features.add('fail-clean')
elif fail_clean == 'n' and \
'fail-clean' in settings.features:
settings.features.remove('fail-clean')
CLEAN_DELAY = 5
try:
CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
except ValueError as e:
portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
settings["CLEAN_DELAY"], noiselevel=-1)
settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
settings.backup_changes("CLEAN_DELAY")
EMERGE_WARNING_DELAY = 10
try:
EMERGE_WARNING_DELAY = int(settings.get(
"EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
except ValueError as e:
portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
settings.backup_changes("EMERGE_WARNING_DELAY")
buildpkg = myopts.get("--buildpkg")
if buildpkg is True:
settings.features.add("buildpkg")
elif buildpkg == 'n':
settings.features.discard("buildpkg")
if "--quiet" in myopts:
settings["PORTAGE_QUIET"]="1"
settings.backup_changes("PORTAGE_QUIET")
if "--verbose" in myopts:
settings["PORTAGE_VERBOSE"] = "1"
settings.backup_changes("PORTAGE_VERBOSE")
# Set so that configs will be merged regardless of remembered status
if ("--noconfmem" in myopts):
settings["NOCONFMEM"]="1"
settings.backup_changes("NOCONFMEM")
# Set various debug markers... They should be merged somehow.
PORTAGE_DEBUG = 0
try:
PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
if PORTAGE_DEBUG not in (0, 1):
portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
PORTAGE_DEBUG, noiselevel=-1)
portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
noiselevel=-1)
PORTAGE_DEBUG = 0
except ValueError as e:
portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
settings["PORTAGE_DEBUG"], noiselevel=-1)
del e
if "--debug" in myopts:
PORTAGE_DEBUG = 1
settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
settings.backup_changes("PORTAGE_DEBUG")
if settings.get("NOCOLOR") not in ("yes","true"):
portage.output.havecolor = 1
# The explicit --color < y | n > option overrides the NOCOLOR environment
# variable and stdout auto-detection.
if "--color" in myopts:
if "y" == myopts["--color"]:
portage.output.havecolor = 1
settings["NOCOLOR"] = "false"
else:
portage.output.havecolor = 0
settings["NOCOLOR"] = "true"
settings.backup_changes("NOCOLOR")
elif settings.get('TERM') == 'dumb' or \
not sys.stdout.isatty():
portage.output.havecolor = 0
settings["NOCOLOR"] = "true"
settings.backup_changes("NOCOLOR")
if "--pkg-format" in myopts:
settings["PORTAGE_BINPKG_FORMAT"] = myopts["--pkg-format"]
settings.backup_changes("PORTAGE_BINPKG_FORMAT")
def display_missing_pkg_set(root_config, set_name):
msg = []
msg.append(("emerge: There are no sets to satisfy '%s'. " + \
"The following sets exist:") % \
colorize("INFORM", set_name))
msg.append("")
for s in sorted(root_config.sets):
msg.append(" %s" % s)
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
level=logging.ERROR, noiselevel=-1)
def relative_profile_path(portdir, abs_profile):
realpath = os.path.realpath(abs_profile)
basepath = os.path.realpath(os.path.join(portdir, "profiles"))
if realpath.startswith(basepath):
profilever = realpath[1 + len(basepath):]
else:
profilever = None
return profilever
def getportageversion(portdir, _unused, profile, chost, vardb):
pythonver = 'python %d.%d.%d-%s-%d' % sys.version_info[:]
profilever = None
repositories = vardb.settings.repositories
if profile:
profilever = relative_profile_path(portdir, profile)
if profilever is None:
try:
for parent in portage.grabfile(
os.path.join(profile, 'parent')):
profilever = relative_profile_path(portdir,
os.path.join(profile, parent))
if profilever is not None:
break
colon = parent.find(":")
if colon != -1:
p_repo_name = parent[:colon]
try:
p_repo_loc = \
repositories.get_location_for_name(p_repo_name)
except KeyError:
pass
else:
profilever = relative_profile_path(p_repo_loc,
os.path.join(p_repo_loc, 'profiles',
parent[colon+1:]))
if profilever is not None:
break
except portage.exception.PortageException:
pass
if profilever is None:
try:
profilever = "!" + os.readlink(profile)
except (OSError):
pass
if profilever is None:
profilever = "unavailable"
libcver = []
libclist = set()
for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
if not atom.blocker:
libclist.update(vardb.match(atom))
if libclist:
for cpv in sorted(libclist):
libc_split = portage.catpkgsplit(cpv)[1:]
if libc_split[-1] == "r0":
libc_split = libc_split[:-1]
libcver.append("-".join(libc_split))
else:
libcver = ["unavailable"]
gccver = getgccversion(chost)
unameout=platform.release()+" "+platform.machine()
return "Portage %s (%s, %s, %s, %s, %s)" % \
(portage.VERSION, pythonver, profilever, gccver, ",".join(libcver), unameout)
class _emerge_config(SlotObject):
__slots__ = ('action', 'args', 'opts',
'running_config', 'target_config', 'trees')
# Support unpack as tuple, for load_emerge_config backward compatibility.
def __iter__(self):
yield self.target_config.settings
yield self.trees
yield self.target_config.mtimedb
def __getitem__(self, index):
return list(self)[index]
def __len__(self):
return 3
def load_emerge_config(emerge_config=None, **kargs):
if emerge_config is None:
emerge_config = _emerge_config(**kargs)
kwargs = {}
for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
("eprefix", "EPREFIX")):
v = os.environ.get(envvar, None)
if v and v.strip():
kwargs[k] = v
emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
**kwargs)
for root_trees in emerge_config.trees.values():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_config = RootConfig(settings, root_trees, setconfig)
if "root_config" in root_trees:
# Propagate changes to the existing instance,
# which may be referenced by a depgraph.
root_trees["root_config"].update(root_config)
else:
root_trees["root_config"] = root_config
target_eroot = emerge_config.trees._target_eroot
emerge_config.target_config = \
emerge_config.trees[target_eroot]['root_config']
emerge_config.target_config.mtimedb = portage.MtimeDB(
os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
emerge_config.running_config = emerge_config.trees[
emerge_config.trees._running_eroot]['root_config']
QueryCommand._db = emerge_config.trees
return emerge_config
def getgccversion(chost=None):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = ['gcc', '-dumpversion']
gcc_ver_prefix = 'gcc-'
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n" +
"!!! to update the environment of this terminal and possibly\n" +
"!!! other terminals also.\n"
)
if chost:
try:
proc = subprocess.Popen(["gcc-config", "-c"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
try:
proc = subprocess.Popen(
[chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
try:
proc = subprocess.Popen(gcc_ver_command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
# Warn about features that may confuse users and
# lead them to report invalid bugs.
_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
def validate_ebuild_environment(trees):
features_warn = set()
for myroot in trees:
settings = trees[myroot]["vartree"].settings
settings.validate()
features_warn.update(
_emerge_features_warn.intersection(settings.features))
if features_warn:
msg = "WARNING: The FEATURES variable contains one " + \
"or more values that should be disabled under " + \
"normal circumstances: %s" % " ".join(features_warn)
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 65):
out.ewarn(line)
check_locale()
def check_procfs():
procfs_path = '/proc'
if platform.system() not in ("Linux",) or \
os.path.ismount(procfs_path):
return os.EX_OK
msg = "It seems that %s is not mounted. You have been warned." % procfs_path
writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
level=logging.ERROR, noiselevel=-1)
return 1
def config_protect_check(trees):
for root, root_trees in trees.items():
settings = root_trees["root_config"].settings
if not settings.get("CONFIG_PROTECT"):
msg = "!!! CONFIG_PROTECT is empty"
if settings["ROOT"] != "/":
msg += " for '%s'" % root
msg += "\n"
writemsg_level(msg, level=logging.WARN, noiselevel=-1)
def apply_priorities(settings):
ionice(settings)
nice(settings)
def nice(settings):
try:
os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
except (OSError, ValueError) as e:
out = portage.output.EOutput()
out.eerror("Failed to change nice value to '%s'" % \
settings.get("PORTAGE_NICENESS", "0"))
out.eerror("%s\n" % str(e))
def ionice(settings):
ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
if ionice_cmd:
ionice_cmd = portage.util.shlex_split(ionice_cmd)
if not ionice_cmd:
return
variables = {"PID" : str(os.getpid())}
cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
try:
rval = portage.process.spawn(cmd, env=os.environ)
except portage.exception.CommandNotFound:
# The OS kernel probably doesn't support ionice,
# so return silently.
return
if rval != os.EX_OK:
out = portage.output.EOutput()
out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
def setconfig_fallback(root_config):
setconfig = root_config.setconfig
setconfig._create_default_config()
setconfig._parse(update=True)
root_config.sets = setconfig.getSets()
def get_missing_sets(root_config):
# emerge requires existence of "world", "selected", and "system"
missing_sets = []
for s in ("selected", "system", "world",):
if s not in root_config.sets:
missing_sets.append(s)
return missing_sets
def missing_sets_warning(root_config, missing_sets):
if len(missing_sets) > 2:
missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
missing_sets_str += ', and "%s"' % missing_sets[-1]
elif len(missing_sets) == 2:
missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
else:
missing_sets_str = '"%s"' % missing_sets[-1]
msg = ["emerge: incomplete set configuration, " + \
"missing set(s): %s" % missing_sets_str]
if root_config.sets:
msg.append(" sets defined: %s" % ", ".join(root_config.sets))
global_config_path = portage.const.GLOBAL_CONFIG_PATH
if portage.const.EPREFIX:
global_config_path = os.path.join(portage.const.EPREFIX,
portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
msg.append(" This usually means that '%s'" % \
(os.path.join(global_config_path, "sets/portage.conf"),))
msg.append(" is missing or corrupt.")
msg.append(" Falling back to default world and system set configuration!!!")
for line in msg:
writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
def ensure_required_sets(trees):
warning_shown = False
for root_trees in trees.values():
missing_sets = get_missing_sets(root_trees["root_config"])
if missing_sets and not warning_shown:
warning_shown = True
missing_sets_warning(root_trees["root_config"], missing_sets)
if missing_sets:
setconfig_fallback(root_trees["root_config"])
def expand_set_arguments(myfiles, myaction, root_config):
retval = os.EX_OK
setconfig = root_config.setconfig
sets = setconfig.getSets()
# In order to know exactly which atoms/sets should be added to the
# world file, the depgraph performs set expansion later. It will get
# confused about where the atoms came from if it's not allowed to
# expand them itself.
do_not_expand = myaction is None
newargs = []
for a in myfiles:
if a in ("system", "world"):
newargs.append(SETPREFIX+a)
else:
newargs.append(a)
myfiles = newargs
del newargs
newargs = []
# separators for set arguments
ARG_START = "{"
ARG_END = "}"
for i in range(0, len(myfiles)):
if myfiles[i].startswith(SETPREFIX):
start = 0
end = 0
x = myfiles[i][len(SETPREFIX):]
newset = ""
while x:
start = x.find(ARG_START)
end = x.find(ARG_END)
if start > 0 and start < end:
namepart = x[:start]
argpart = x[start+1:end]
# TODO: implement proper quoting
args = argpart.split(",")
options = {}
for a in args:
if "=" in a:
k, v = a.split("=", 1)
options[k] = v
else:
options[a] = "True"
setconfig.update(namepart, options)
newset += (x[:start-len(namepart)]+namepart)
x = x[end+len(ARG_END):]
else:
newset += x
x = ""
myfiles[i] = SETPREFIX+newset
sets = setconfig.getSets()
# display errors that occurred while loading the SetConfig instance
for e in setconfig.errors:
print(colorize("BAD", "Error during set creation: %s" % e))
unmerge_actions = ("unmerge", "prune", "clean", "depclean", "rage-clean")
for a in myfiles:
if a.startswith(SETPREFIX):
s = a[len(SETPREFIX):]
if s not in sets:
display_missing_pkg_set(root_config, s)
return (None, 1)
if s == "installed":
msg = ("The @installed set is not recommended when "
"updating packages because it will often "
"introduce unsolved blocker conflicts. Please "
"refer to bug #387059 for details.")
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 57):
out.ewarn(line)
setconfig.active.append(s)
if do_not_expand:
# Loading sets can be slow, so skip it here, in order
# to allow the depgraph to indicate progress with the
# spinner while sets are loading (bug #461412).
newargs.append(a)
continue
try:
set_atoms = setconfig.getSetAtoms(s)
except portage.exception.PackageSetNotFound as e:
writemsg_level(("emerge: the given set '%s' " + \
"contains a non-existent set named '%s'.\n") % \
(s, e), level=logging.ERROR, noiselevel=-1)
if s in ('world', 'selected') and \
SETPREFIX + e.value in sets['selected']:
writemsg_level(("Use `emerge --deselect %s%s` to "
"remove this set from world_sets.\n") %
(SETPREFIX, e,), level=logging.ERROR,
noiselevel=-1)
return (None, 1)
if myaction in unmerge_actions and \
not sets[s].supportsOperation("unmerge"):
writemsg_level("emerge: the given set '%s' does " % s + \
"not support unmerge operations\n",
level=logging.ERROR, noiselevel=-1)
retval = 1
elif not set_atoms:
writemsg_level("emerge: '%s' is an empty set\n" % s,
level=logging.INFO, noiselevel=-1)
else:
newargs.extend(set_atoms)
for error_msg in sets[s].errors:
writemsg_level("%s\n" % (error_msg,),
level=logging.ERROR, noiselevel=-1)
else:
newargs.append(a)
return (newargs, retval)
def repo_name_check(trees):
missing_repo_names = set()
for root_trees in trees.values():
porttree = root_trees.get("porttree")
if porttree:
portdb = porttree.dbapi
missing_repo_names.update(portdb.getMissingRepoNames())
# Skip warnings about missing repo_name entries for
# /usr/local/portage (see bug #248603).
try:
missing_repo_names.remove('/usr/local/portage')
except KeyError:
pass
if missing_repo_names:
msg = []
msg.append("WARNING: One or more repositories " + \
"have missing repo_name entries:")
msg.append("")
for p in missing_repo_names:
msg.append("\t%s/profiles/repo_name" % (p,))
msg.append("")
msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
"should be a plain text file containing a unique " + \
"name for the repository on the first line.", 70))
msg.append("\n")
writemsg_level("".join("%s\n" % l for l in msg),
level=logging.WARNING, noiselevel=-1)
return bool(missing_repo_names)
def repo_name_duplicate_check(trees):
ignored_repos = {}
for root, root_trees in trees.items():
if 'porttree' in root_trees:
portdb = root_trees['porttree'].dbapi
if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
for repo_name, paths in portdb.getIgnoredRepos():
k = (root, repo_name, portdb.getRepositoryPath(repo_name))
ignored_repos.setdefault(k, []).extend(paths)
if ignored_repos:
msg = []
msg.append('WARNING: One or more repositories ' + \
'have been ignored due to duplicate')
msg.append(' profiles/repo_name entries:')
msg.append('')
for k in sorted(ignored_repos):
msg.append(' %s overrides' % ", ".join(k))
for path in ignored_repos[k]:
msg.append(' %s' % (path,))
msg.append('')
msg.extend(' ' + x for x in textwrap.wrap(
"All profiles/repo_name entries must be unique in order " + \
"to avoid having duplicates ignored. " + \
"Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
"/etc/portage/make.conf if you would like to disable this warning."))
msg.append("\n")
writemsg_level(''.join('%s\n' % l for l in msg),
level=logging.WARNING, noiselevel=-1)
return bool(ignored_repos)
def run_action(emerge_config):
# skip global updates prior to sync, since it's called after sync
if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
emerge_config.opts.get('--package-moves') != 'n' and \
_global_updates(emerge_config.trees,
emerge_config.target_config.mtimedb["updates"],
quiet=("--quiet" in emerge_config.opts)):
emerge_config.target_config.mtimedb.commit()
# Reload the whole config from scratch.
load_emerge_config(emerge_config=emerge_config)
xterm_titles = "notitles" not in \
emerge_config.target_config.settings.features
if xterm_titles:
xtermTitle("emerge")
if "--digest" in emerge_config.opts:
os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
# Reload the whole config from scratch so that the portdbapi internal
# config is updated with new FEATURES.
load_emerge_config(emerge_config=emerge_config)
# NOTE: adjust_configs() can map options to FEATURES, so any relevant
# options adjustments should be made prior to calling adjust_configs().
if "--buildpkgonly" in emerge_config.opts:
emerge_config.opts["--buildpkg"] = True
if "getbinpkg" in emerge_config.target_config.settings.features:
emerge_config.opts["--getbinpkg"] = True
if "--getbinpkgonly" in emerge_config.opts:
emerge_config.opts["--getbinpkg"] = True
if "--getbinpkgonly" in emerge_config.opts:
emerge_config.opts["--usepkgonly"] = True
if "--getbinpkg" in emerge_config.opts:
emerge_config.opts["--usepkg"] = True
if "--usepkgonly" in emerge_config.opts:
emerge_config.opts["--usepkg"] = True
if "--buildpkgonly" in emerge_config.opts:
# --buildpkgonly will not merge anything, so
# it cancels all binary package options.
for opt in ("--getbinpkg", "--getbinpkgonly",
"--usepkg", "--usepkgonly"):
emerge_config.opts.pop(opt, None)
adjust_configs(emerge_config.opts, emerge_config.trees)
apply_priorities(emerge_config.target_config.settings)
if ("--autounmask-continue" in emerge_config.opts and
emerge_config.opts.get("--autounmask") == "n"):
writemsg_level(
" %s --autounmask-continue has been disabled by --autounmask=n\n" %
warn("*"), level=logging.WARNING, noiselevel=-1)
for fmt in emerge_config.target_config.settings.get("PORTAGE_BINPKG_FORMAT", "").split():
if not fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
if "--pkg-format" in emerge_config.opts:
problematic="--pkg-format"
else:
problematic="PORTAGE_BINPKG_FORMAT"
writemsg_level(("emerge: %s is not set correctly. Format " + \
"'%s' is not supported.\n") % (problematic, fmt),
level=logging.ERROR, noiselevel=-1)
return 1
if emerge_config.action == 'version':
writemsg_stdout(getportageversion(
emerge_config.target_config.settings["PORTDIR"],
None,
emerge_config.target_config.settings.profile_path,
emerge_config.target_config.settings.get("CHOST"),
emerge_config.target_config.trees['vartree'].dbapi) + '\n',
noiselevel=-1)
return 0
elif emerge_config.action == 'help':
emerge_help()
return 0
spinner = stdout_spinner()
if "candy" in emerge_config.target_config.settings.features:
spinner.update = spinner.update_scroll
if "--quiet" not in emerge_config.opts:
portage.deprecated_profile_check(
settings=emerge_config.target_config.settings)
repo_name_check(emerge_config.trees)
repo_name_duplicate_check(emerge_config.trees)
config_protect_check(emerge_config.trees)
check_procfs()
for mytrees in emerge_config.trees.values():
mydb = mytrees["porttree"].dbapi
# Freeze the portdbapi for performance (memoize all xmatch results).
mydb.freeze()
if emerge_config.action in ('search', None) and \
"--usepkg" in emerge_config.opts:
# Populate the bintree with current --getbinpkg setting.
# This needs to happen before expand_set_arguments(), in case
# any sets use the bintree.
try:
mytrees["bintree"].populate(
getbinpkgs="--getbinpkg" in emerge_config.opts)
except ParseError as e:
writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
% e, noiselevel=-1)
return 1
del mytrees, mydb
for x in emerge_config.args:
if x.endswith((".ebuild", ".tbz2")) and \
os.path.exists(os.path.abspath(x)):
print(colorize("BAD", "\n*** emerging by path is broken "
"and may not always work!!!\n"))
break
if emerge_config.action == "list-sets":
writemsg_stdout("".join("%s\n" % s for s in
sorted(emerge_config.target_config.sets)))
return os.EX_OK
elif emerge_config.action == "check-news":
news_counts = count_unread_news(
emerge_config.target_config.trees["porttree"].dbapi,
emerge_config.target_config.trees["vartree"].dbapi)
if any(news_counts.values()):
display_news_notifications(news_counts)
elif "--quiet" not in emerge_config.opts:
print("", colorize("GOOD", "*"), "No news items were found.")
return os.EX_OK
ensure_required_sets(emerge_config.trees)
if emerge_config.action is None and \
"--resume" in emerge_config.opts and emerge_config.args:
writemsg("emerge: unexpected argument(s) for --resume: %s\n" %
" ".join(emerge_config.args), noiselevel=-1)
return 1
# only expand sets for actions taking package arguments
oldargs = emerge_config.args[:]
if emerge_config.action in ("clean", "config", "depclean",
"info", "prune", "unmerge", "rage-clean", None):
newargs, retval = expand_set_arguments(
emerge_config.args, emerge_config.action,
emerge_config.target_config)
if retval != os.EX_OK:
return retval
# Need to handle empty sets specially, otherwise emerge will react
# with the help message for empty argument lists
if oldargs and not newargs:
print("emerge: no targets left after set expansion")
return 0
emerge_config.args = newargs
if "--tree" in emerge_config.opts and \
"--columns" in emerge_config.opts:
print("emerge: can't specify both of \"--tree\" and \"--columns\".")
return 1
if '--emptytree' in emerge_config.opts and \
'--noreplace' in emerge_config.opts:
writemsg_level("emerge: can't specify both of " + \
"\"--emptytree\" and \"--noreplace\".\n",
level=logging.ERROR, noiselevel=-1)
return 1
if ("--quiet" in emerge_config.opts):
spinner.update = spinner.update_quiet
portage.util.noiselimit = -1
if "--fetch-all-uri" in emerge_config.opts:
emerge_config.opts["--fetchonly"] = True
if "--skipfirst" in emerge_config.opts and \
"--resume" not in emerge_config.opts:
emerge_config.opts["--resume"] = True
# Allow -p to remove --ask
if "--pretend" in emerge_config.opts:
emerge_config.opts.pop("--ask", None)
# forbid --ask when not in a terminal
# note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
if ("--ask" in emerge_config.opts) and (not sys.stdin.isatty()):
portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
noiselevel=-1)
return 1
if emerge_config.target_config.settings.get("PORTAGE_DEBUG", "") == "1":
spinner.update = spinner.update_quiet
portage.util.noiselimit = 0
if "python-trace" in emerge_config.target_config.settings.features:
portage.debug.set_trace(True)
if not ("--quiet" in emerge_config.opts):
if '--nospinner' in emerge_config.opts or \
emerge_config.target_config.settings.get('TERM') == 'dumb' or \
not sys.stdout.isatty():
spinner.update = spinner.update_basic
if "--debug" in emerge_config.opts:
print("myaction", emerge_config.action)
print("myopts", emerge_config.opts)
if not emerge_config.action and not emerge_config.args and \
"--resume" not in emerge_config.opts:
emerge_help()
return 1
pretend = "--pretend" in emerge_config.opts
fetchonly = "--fetchonly" in emerge_config.opts or \
"--fetch-all-uri" in emerge_config.opts
buildpkgonly = "--buildpkgonly" in emerge_config.opts
# check if root user is the current user for the actions where emerge needs this
if portage.data.secpass < 2:
# We've already allowed "--version" and "--help" above.
if "--pretend" not in emerge_config.opts and \
emerge_config.action not in ("search", "info"):
need_superuser = emerge_config.action in ('clean', 'depclean',
'deselect', 'prune', 'unmerge', "rage-clean") or not \
(fetchonly or \
(buildpkgonly and portage.data.secpass >= 1) or \
emerge_config.action in ("metadata", "regen", "sync"))
if portage.data.secpass < 1 or \
need_superuser:
if need_superuser:
access_desc = "superuser"
else:
access_desc = "portage group"
# Always show portage_group_warning() when only portage group
# access is required but the user is not in the portage group.
if "--ask" in emerge_config.opts:
writemsg_stdout("This action requires %s access...\n" % \
(access_desc,), noiselevel=-1)
if portage.data.secpass < 1 and not need_superuser:
portage.data.portage_group_warning()
uq = UserQuery(emerge_config.opts)
if uq.query("Would you like to add --pretend to options?",
"--ask-enter-invalid" in emerge_config.opts) == "No":
return 128 + signal.SIGINT
emerge_config.opts["--pretend"] = True
emerge_config.opts.pop("--ask")
else:
sys.stderr.write(("emerge: %s access is required\n") \
% access_desc)
if portage.data.secpass < 1 and not need_superuser:
portage.data.portage_group_warning()
return 1
# Disable emergelog for everything except build or unmerge operations.
# This helps minimize parallel emerge.log entries that can confuse log
# parsers like genlop.
disable_emergelog = False
for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
if x in emerge_config.opts:
disable_emergelog = True
break
if disable_emergelog:
pass
elif emerge_config.action in ("search", "info"):
disable_emergelog = True
elif portage.data.secpass < 1:
disable_emergelog = True
import _emerge.emergelog
_emerge.emergelog._disable = disable_emergelog
if not disable_emergelog:
emerge_log_dir = \
emerge_config.target_config.settings.get('EMERGE_LOG_DIR')
if emerge_log_dir:
try:
# At least the parent needs to exist for the lock file.
portage.util.ensure_dirs(emerge_log_dir)
except portage.exception.PortageException as e:
writemsg_level("!!! Error creating directory for " + \
"EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
(emerge_log_dir, e),
noiselevel=-1, level=logging.ERROR)
portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
else:
_emerge.emergelog._emerge_log_dir = emerge_log_dir
else:
_emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
portage.const.EPREFIX.lstrip(os.sep), "var", "log")
portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
if not "--pretend" in emerge_config.opts:
time_fmt = "%b %d, %Y %H:%M:%S"
if sys.hexversion < 0x3000000:
time_fmt = portage._unicode_encode(time_fmt)
time_str = time.strftime(time_fmt, time.localtime(time.time()))
# Avoid potential UnicodeDecodeError in Python 2, since strftime
# returns bytes in Python 2, and %b may contain non-ascii chars.
time_str = _unicode_decode(time_str,
encoding=_encodings['content'], errors='replace')
emergelog(xterm_titles, "Started emerge on: %s" % time_str)
myelogstr=""
if emerge_config.opts:
opt_list = []
for opt, arg in emerge_config.opts.items():
if arg is True:
opt_list.append(opt)
elif isinstance(arg, list):
# arguments like --exclude that use 'append' action
for x in arg:
opt_list.append("%s=%s" % (opt, x))
else:
opt_list.append("%s=%s" % (opt, arg))
myelogstr=" ".join(opt_list)
if emerge_config.action:
myelogstr += " --" + emerge_config.action
if oldargs:
myelogstr += " " + " ".join(oldargs)
emergelog(xterm_titles, " *** emerge " + myelogstr)
oldargs = None
def emergeexitsig(signum, frame):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
portage.util.writemsg(
"\n\nExiting on signal %(signal)s\n" % {"signal":signum})
sys.exit(128 + signum)
signal.signal(signal.SIGTERM, emergeexitsig)
def emergeexit():
"""This gets out final log message in before we quit."""
if "--pretend" not in emerge_config.opts:
emergelog(xterm_titles, " *** terminating.")
if xterm_titles:
xtermTitleReset()
portage.atexit_register(emergeexit)
if emerge_config.action in ("config", "metadata", "regen", "sync"):
if "--pretend" in emerge_config.opts:
sys.stderr.write(("emerge: The '%s' action does " + \
"not support '--pretend'.\n") % emerge_config.action)
return 1
if "sync" == emerge_config.action:
return action_sync(emerge_config)
elif "metadata" == emerge_config.action:
action_metadata(emerge_config.target_config.settings,
emerge_config.target_config.trees['porttree'].dbapi,
emerge_config.opts)
elif emerge_config.action=="regen":
validate_ebuild_environment(emerge_config.trees)
return action_regen(emerge_config.target_config.settings,
emerge_config.target_config.trees['porttree'].dbapi,
emerge_config.opts.get("--jobs"),
emerge_config.opts.get("--load-average"))
# HELP action
elif "config" == emerge_config.action:
validate_ebuild_environment(emerge_config.trees)
action_config(emerge_config.target_config.settings,
emerge_config.trees, emerge_config.opts, emerge_config.args)
# SEARCH action
elif "search" == emerge_config.action:
validate_ebuild_environment(emerge_config.trees)
action_search(emerge_config.target_config,
emerge_config.opts, emerge_config.args, spinner)
elif emerge_config.action in \
('clean', 'depclean', 'deselect', 'prune', 'unmerge', 'rage-clean'):
validate_ebuild_environment(emerge_config.trees)
rval = action_uninstall(emerge_config.target_config.settings,
emerge_config.trees, emerge_config.target_config.mtimedb["ldpath"],
emerge_config.opts, emerge_config.action,
emerge_config.args, spinner)
if not (emerge_config.action == 'deselect' or
buildpkgonly or fetchonly or pretend):
post_emerge(emerge_config.action, emerge_config.opts,
emerge_config.args, emerge_config.target_config.root,
emerge_config.trees, emerge_config.target_config.mtimedb, rval)
return rval
elif emerge_config.action == 'info':
# Ensure atoms are valid before calling unmerge().
vardb = emerge_config.target_config.trees['vartree'].dbapi
portdb = emerge_config.target_config.trees['porttree'].dbapi
bindb = emerge_config.target_config.trees['bintree'].dbapi
valid_atoms = []
for x in emerge_config.args:
if is_valid_package_atom(x, allow_repo=True):
try:
#look at the installed files first, if there is no match
#look at the ebuilds, since EAPI 4 allows running pkg_info
#on non-installed packages
valid_atom = dep_expand(x, mydb=vardb)
if valid_atom.cp.split("/")[0] == "null":
valid_atom = dep_expand(x, mydb=portdb)
if valid_atom.cp.split("/")[0] == "null" and \
"--usepkg" in emerge_config.opts:
valid_atom = dep_expand(x, mydb=bindb)
valid_atoms.append(valid_atom)
except portage.exception.AmbiguousPackageName as e:
msg = "The short ebuild name \"" + x + \
"\" is ambiguous. Please specify " + \
"one of the following " + \
"fully-qualified ebuild names instead:"
for line in textwrap.wrap(msg, 70):
writemsg_level("!!! %s\n" % (line,),
level=logging.ERROR, noiselevel=-1)
for i in e.args[0]:
writemsg_level(" %s\n" % colorize("INFORM", i),
level=logging.ERROR, noiselevel=-1)
writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
return 1
continue
msg = []
msg.append("'%s' is not a valid package atom." % (x,))
msg.append("Please check ebuild(5) for full details.")
writemsg_level("".join("!!! %s\n" % line for line in msg),
level=logging.ERROR, noiselevel=-1)
return 1
return action_info(emerge_config.target_config.settings,
emerge_config.trees, emerge_config.opts, valid_atoms)
# "update", "system", or just process files:
else:
validate_ebuild_environment(emerge_config.trees)
for x in emerge_config.args:
if x.startswith(SETPREFIX) or \
is_valid_package_atom(x, allow_repo=True):
continue
if x[:1] == os.sep:
continue
try:
os.lstat(x)
continue
except OSError:
pass
msg = []
msg.append("'%s' is not a valid package atom." % (x,))
msg.append("Please check ebuild(5) for full details.")
writemsg_level("".join("!!! %s\n" % line for line in msg),
level=logging.ERROR, noiselevel=-1)
return 1
# GLEP 42 says to display news *after* an emerge --pretend
if "--pretend" not in emerge_config.opts:
uq = UserQuery(emerge_config.opts)
if display_news_notification(emerge_config.target_config,
emerge_config.opts) \
and "--ask" in emerge_config.opts \
and "--read-news" in emerge_config.opts \
and uq.query("Would you like to read the news items while " \
"calculating dependencies?",
'--ask-enter-invalid' in emerge_config.opts) == "Yes":
try:
subprocess.call(['eselect', 'news', 'read'])
# If eselect is not installed, Python <3.3 will throw an
# OSError. >=3.3 will throw a FileNotFoundError, which is a
# subclass of OSError.
except OSError:
writemsg("Please install eselect to use this feature.\n",
noiselevel=-1)
retval = action_build(emerge_config, spinner=spinner)
post_emerge(emerge_config.action, emerge_config.opts,
emerge_config.args, emerge_config.target_config.root,
emerge_config.trees, emerge_config.target_config.mtimedb, retval)
return retval
| gpl-2.0 |
kawamon/hue | desktop/core/ext-py/docutils-0.14/docutils/languages/cs.py | 148 | 1928 | # $Id: cs.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Marek Blaha <mb@dat.cz>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Czech-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': u'Autor',
'authors': u'Auto\u0159i',
'organization': u'Organizace',
'address': u'Adresa',
'contact': u'Kontakt',
'version': u'Verze',
'revision': u'Revize',
'status': u'Stav',
'date': u'Datum',
'copyright': u'Copyright',
'dedication': u'V\u011Bnov\u00E1n\u00ED',
'abstract': u'Abstrakt',
'attention': u'Pozor!',
'caution': u'Opatrn\u011B!',
'danger': u'!NEBEZPE\u010C\u00CD!',
'error': u'Chyba',
'hint': u'Rada',
'important': u'D\u016Fle\u017Eit\u00E9',
'note': u'Pozn\u00E1mka',
'tip': u'Tip',
'warning': u'Varov\u00E1n\u00ED',
'contents': u'Obsah'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'autor': 'author',
u'auto\u0159i': 'authors',
u'organizace': 'organization',
u'adresa': 'address',
u'kontakt': 'contact',
u'verze': 'version',
u'revize': 'revision',
u'stav': 'status',
u'datum': 'date',
u'copyright': 'copyright',
u'v\u011Bnov\u00E1n\u00ED': 'dedication',
u'abstrakt': 'abstract'}
"""Czech (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 |
cloudera/hue | desktop/core/ext-py/Django-1.11.29/django/contrib/auth/management/commands/changepassword.py | 51 | 2677 | from __future__ import unicode_literals
import getpass
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
UserModel = get_user_model()
class Command(BaseCommand):
help = "Change a user's password for django.contrib.auth."
requires_migrations_checks = True
requires_system_checks = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=force_str(prompt))
if not p:
raise CommandError("aborted")
return p
def add_arguments(self, parser):
parser.add_argument(
'username', nargs='?',
help='Username to change password for; by default, it\'s the current username.',
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".',
)
def handle(self, *args, **options):
if options['username']:
username = options['username']
else:
username = getpass.getuser()
try:
u = UserModel._default_manager.using(options['database']).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
password_validated = False
while (p1 != p2 or not password_validated) and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count += 1
# Don't validate passwords that don't match.
continue
try:
validate_password(p2, u)
except ValidationError as err:
self.stderr.write('\n'.join(err.messages))
count += 1
else:
password_validated = True
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| apache-2.0 |
cpennington/edx-platform | openedx/core/djangoapps/verified_track_content/views.py | 4 | 1417 | """
View methods for verified track content.
"""
from django.contrib.auth.decorators import login_required
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.courseware.courses import get_course_with_access
from openedx.core.djangoapps.verified_track_content.models import VerifiedTrackCohortedCourse
from util.json_request import JsonResponse, expect_json
@expect_json
@login_required
def cohorting_settings(request, course_key_string):
"""
The handler for verified track cohorting requests.
This will raise 404 if user is not staff.
Returns a JSON representation of whether or not the course has verified track cohorting enabled.
The "verified_cohort_name" field will only be present if "enabled" is True.
Example:
>>> example = {
>>> "enabled": True,
>>> "verified_cohort_name" : "Micromasters"
>>> }
"""
course_key = CourseKey.from_string(course_key_string)
get_course_with_access(request.user, 'staff', course_key)
settings = {}
verified_track_cohort_enabled = VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key)
settings['enabled'] = verified_track_cohort_enabled
if verified_track_cohort_enabled:
settings['verified_cohort_name'] = VerifiedTrackCohortedCourse.verified_cohort_name_for_course(course_key)
return JsonResponse(settings)
| agpl-3.0 |
wwj718/murp-edx | lms/djangoapps/instructor/views/legacy.py | 2 | 82681 | """
Instructor Views
"""
## NOTE: This is the code for the legacy instructor dashboard
## We are no longer supporting this file or accepting changes into it.
from contextlib import contextmanager
import csv
import json
import logging
import os
import re
import requests
from collections import defaultdict, OrderedDict
from markupsafe import escape
from requests.status_codes import codes
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.utils import timezone
from xmodule_modifiers import wrap_xblock
import xmodule.graders as xmgraders
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.html_module import HtmlDescriptor
from opaque_keys import InvalidKeyError
from lms.lib.xblock.runtime import quote_slashes
from submissions import api as sub_api # installed from the edx-submissions repository
from bulk_email.models import CourseEmail, CourseAuthorization
from courseware import grades
from courseware.access import has_access
from courseware.courses import get_course_with_access, get_cms_course_link
from student.roles import (
CourseStaffRole, CourseInstructorRole, CourseBetaTesterRole, GlobalStaff
)
from courseware.models import StudentModule
from django_comment_common.models import (
Role, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA
)
from django_comment_client.utils import has_forum_access
from instructor.offline_gradecalc import student_grades, offline_grades_available
from instructor.views.tools import strip_if_string, bulk_email_is_enabled_for_course
from instructor_task.api import (
get_running_instructor_tasks,
get_instructor_task_history,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_bulk_course_email
)
from instructor_task.views import get_task_completion_info
from edxmako.shortcuts import render_to_response, render_to_string
from class_dashboard import dashboard_data
from psychometrics import psychoanalyze
from student.models import (
CourseEnrollment,
CourseEnrollmentAllowed,
unique_id_for_user,
anonymous_id_for_user
)
import track.views
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from django.utils.translation import ugettext as _
from microsite_configuration import microsite
from opaque_keys.edx.locations import i4xEncoder
log = logging.getLogger(__name__)
# internal commands for managing forum roles:
FORUM_ROLE_ADD = 'add'
FORUM_ROLE_REMOVE = 'remove'
# For determining if a shibboleth course
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
def split_by_comma_and_whitespace(a_str):
"""
Return string a_str, split by , or whitespace
"""
return re.split(r'[\s,]', a_str)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard(request, course_id):
"""Display the instructor dashboard for a course."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key, depth=None)
instructor_access = has_access(request.user, 'instructor', course) # an instructor can manage staff lists
forum_admin_access = has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR)
msg = ''
email_msg = ''
email_to_option = None
email_subject = None
html_message = ''
show_email_tab = False
problems = []
plots = []
datatable = {}
# the instructor dashboard page is modal: grades, psychometrics, admin
# keep that state in request.session (defaults to grades mode)
idash_mode = request.POST.get('idash_mode', '')
idash_mode_key = u'idash_mode:{0}'.format(course_id)
if idash_mode:
request.session[idash_mode_key] = idash_mode
else:
idash_mode = request.session.get(idash_mode_key, 'Grades')
enrollment_number = CourseEnrollment.num_enrolled_in(course_key)
# assemble some course statistics for output to instructor
def get_course_stats_table():
datatable = {
'header': ['Statistic', 'Value'],
'title': _('Course Statistics At A Glance'),
}
data = [['# Enrolled', enrollment_number]]
data += [['Date', timezone.now().isoformat()]]
data += compute_course_stats(course).items()
if request.user.is_staff:
for field in course.fields.values():
if getattr(field.scope, 'user', False):
continue
data.append([
field.name,
json.dumps(field.read_json(course), cls=i4xEncoder)
])
datatable['data'] = data
return datatable
def return_csv(func, datatable, file_pointer=None):
"""Outputs a CSV file from the contents of a datatable."""
if file_pointer is None:
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(func)
else:
response = file_pointer
writer = csv.writer(response, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
encoded_row = [unicode(s).encode('utf-8') for s in datatable['header']]
writer.writerow(encoded_row)
for datarow in datatable['data']:
# 's' here may be an integer, float (eg score) or string (eg student name)
encoded_row = [
# If s is already a UTF-8 string, trying to make a unicode
# object out of it will fail unless we pass in an encoding to
# the constructor. But we can't do that across the board,
# because s is often a numeric type. So just do this.
s if isinstance(s, str) else unicode(s).encode('utf-8')
for s in datarow
]
writer.writerow(encoded_row)
return response
def get_student_from_identifier(unique_student_identifier):
"""Gets a student object using either an email address or username"""
unique_student_identifier = strip_if_string(unique_student_identifier)
msg = ""
try:
if "@" in unique_student_identifier:
student = User.objects.get(email=unique_student_identifier)
else:
student = User.objects.get(username=unique_student_identifier)
msg += _("Found a single student. ")
except User.DoesNotExist:
student = None
msg += "<font color='red'>{text}</font>".format(
text=_("Couldn't find student with that email or username.")
)
return msg, student
# process actions from form POST
action = request.POST.get('action', '')
use_offline = request.POST.get('use_offline_grades', False)
if settings.FEATURES['ENABLE_MANUAL_GIT_RELOAD']:
if 'GIT pull' in action:
data_dir = course.data_dir
log.debug('git pull {0}'.format(data_dir))
gdir = settings.DATA_DIR / data_dir
if not os.path.exists(gdir):
msg += "====> ERROR in gitreload - no such directory {0}".format(gdir)
else:
cmd = "cd {0}; git reset --hard HEAD; git clean -f -d; git pull origin; chmod g+w course.xml".format(gdir)
msg += "git pull on {0}:<p>".format(data_dir)
msg += "<pre>{0}</pre></p>".format(escape(os.popen(cmd).read()))
track.views.server_track(request, "git-pull", {"directory": data_dir}, page="idashboard")
if 'Reload course' in action:
log.debug('reloading {0} ({1})'.format(course_key, course))
try:
data_dir = course.data_dir
modulestore().try_load_course(data_dir)
msg += "<br/><p>Course reloaded from {0}</p>".format(data_dir)
track.views.server_track(request, "reload", {"directory": data_dir}, page="idashboard")
course_errors = modulestore().get_course_errors(course.id)
msg += '<ul>'
for cmsg, cerr in course_errors:
msg += "<li>{0}: <pre>{1}</pre>".format(cmsg, escape(cerr))
msg += '</ul>'
except Exception as err: # pylint: disable=broad-except
msg += '<br/><p>Error: {0}</p>'.format(escape(err))
if action == 'Dump list of enrolled students' or action == 'List enrolled students':
log.debug(action)
datatable = get_student_grade_summary_data(request, course, get_grades=False, use_offline=use_offline)
datatable['title'] = _('List of students enrolled in {course_key}').format(course_key=course_key.to_deprecated_string())
track.views.server_track(request, "list-students", {}, page="idashboard")
elif 'Dump Grades' in action:
log.debug(action)
datatable = get_student_grade_summary_data(request, course, get_grades=True, use_offline=use_offline)
datatable['title'] = _('Summary Grades of students enrolled in {course_key}').format(course_key=course_key.to_deprecated_string())
track.views.server_track(request, "dump-grades", {}, page="idashboard")
elif 'Dump all RAW grades' in action:
log.debug(action)
datatable = get_student_grade_summary_data(request, course, get_grades=True,
get_raw_scores=True, use_offline=use_offline)
datatable['title'] = _('Raw Grades of students enrolled in {course_key}').format(course_key=course_key)
track.views.server_track(request, "dump-grades-raw", {}, page="idashboard")
elif 'Download CSV of all student grades' in action:
track.views.server_track(request, "dump-grades-csv", {}, page="idashboard")
return return_csv('grades_{0}.csv'.format(course_key.to_deprecated_string()),
get_student_grade_summary_data(request, course, use_offline=use_offline))
elif 'Download CSV of all RAW grades' in action:
track.views.server_track(request, "dump-grades-csv-raw", {}, page="idashboard")
return return_csv('grades_{0}_raw.csv'.format(course_key.to_deprecated_string()),
get_student_grade_summary_data(request, course, get_raw_scores=True, use_offline=use_offline))
elif 'Download CSV of answer distributions' in action:
track.views.server_track(request, "dump-answer-dist-csv", {}, page="idashboard")
return return_csv('answer_dist_{0}.csv'.format(course_key.to_deprecated_string()), get_answers_distribution(request, course_key))
elif 'Dump description of graded assignments configuration' in action:
# what is "graded assignments configuration"?
track.views.server_track(request, "dump-graded-assignments-config", {}, page="idashboard")
msg += dump_grading_context(course)
elif "Rescore ALL students' problem submissions" in action:
problem_location_str = strip_if_string(request.POST.get('problem_for_all_students', ''))
try:
problem_location = course_key.make_usage_key_from_deprecated_string(problem_location_str)
instructor_task = submit_rescore_problem_for_all_students(request, problem_location)
if instructor_task is None:
msg += '<font color="red">{text}</font>'.format(
text=_('Failed to create a background task for rescoring "{problem_url}".').format(
problem_url=problem_location_str
)
)
else:
track.views.server_track(
request,
"rescore-all-submissions",
{
"problem": problem_location_str,
"course": course_key.to_deprecated_string()
},
page="idashboard"
)
except (InvalidKeyError, ItemNotFoundError) as err:
msg += '<font color="red">{text}</font>'.format(
text=_('Failed to create a background task for rescoring "{problem_url}": problem not found.').format(
problem_url=problem_location_str
)
)
except Exception as err: # pylint: disable=broad-except
log.error("Encountered exception from rescore: {0}".format(err))
msg += '<font color="red">{text}</font>'.format(
text=_('Failed to create a background task for rescoring "{url}": {message}.').format(
url=problem_location_str, message=err.message
)
)
elif "Reset ALL students' attempts" in action:
problem_location_str = strip_if_string(request.POST.get('problem_for_all_students', ''))
try:
problem_location = course_key.make_usage_key_from_deprecated_string(problem_location_str)
instructor_task = submit_reset_problem_attempts_for_all_students(request, problem_location)
if instructor_task is None:
msg += '<font color="red">{text}</font>'.format(
text=_('Failed to create a background task for resetting "{problem_url}".').format(problem_url=problem_location_str)
)
else:
track.views.server_track(
request,
"reset-all-attempts",
{
"problem": problem_location_str,
"course": course_key.to_deprecated_string()
},
page="idashboard"
)
except (InvalidKeyError, ItemNotFoundError) as err:
log.error('Failure to reset: unknown problem "{0}"'.format(err))
msg += '<font color="red">{text}</font>'.format(
text=_('Failed to create a background task for resetting "{problem_url}": problem not found.').format(
problem_url=problem_location_str
)
)
except Exception as err: # pylint: disable=broad-except
log.error("Encountered exception from reset: {0}".format(err))
msg += '<font color="red">{text}</font>'.format(
text=_('Failed to create a background task for resetting "{url}": {message}.').format(
url=problem_location_str, message=err.message
)
)
elif "Show Background Task History for Student" in action:
# put this before the non-student case, since the use of "in" will cause this to be missed
unique_student_identifier = request.POST.get('unique_student_identifier', '')
message, student = get_student_from_identifier(unique_student_identifier)
if student is None:
msg += message
else:
problem_location_str = strip_if_string(request.POST.get('problem_for_student', ''))
try:
problem_location = course_key.make_usage_key_from_deprecated_string(problem_location_str)
except InvalidKeyError:
msg += '<font color="red">{text}</font>'.format(
text=_('Could not find problem location "{url}".').format(
url=problem_location_str
)
)
else:
message, datatable = get_background_task_table(course_key, problem_location, student)
msg += message
elif "Show Background Task History" in action:
problem_location_str = strip_if_string(request.POST.get('problem_for_all_students', ''))
try:
problem_location = course_key.make_usage_key_from_deprecated_string(problem_location_str)
except InvalidKeyError:
msg += '<font color="red">{text}</font>'.format(
text=_('Could not find problem location "{url}".').format(
url=problem_location_str
)
)
else:
message, datatable = get_background_task_table(course_key, problem_location)
msg += message
elif ("Reset student's attempts" in action or
"Delete student state for module" in action or
"Rescore student's problem submission" in action):
# get the form data
unique_student_identifier = request.POST.get(
'unique_student_identifier', ''
)
problem_location_str = strip_if_string(request.POST.get('problem_for_student', ''))
try:
module_state_key = course_key.make_usage_key_from_deprecated_string(problem_location_str)
except InvalidKeyError:
msg += '<font color="red">{text}</font>'.format(
text=_('Could not find problem location "{url}".').format(
url=problem_location_str
)
)
else:
# try to uniquely id student by email address or username
message, student = get_student_from_identifier(unique_student_identifier)
msg += message
student_module = None
if student is not None:
# Reset the student's score in the submissions API
# Currently this is used only by open assessment (ORA 2)
# We need to do this *before* retrieving the `StudentModule` model,
# because it's possible for a score to exist even if no student module exists.
if "Delete student state for module" in action:
try:
sub_api.reset_score(
anonymous_id_for_user(student, course_key),
course_key.to_deprecated_string(),
module_state_key.to_deprecated_string(),
)
except sub_api.SubmissionError:
# Trust the submissions API to log the error
error_msg = _("An error occurred while deleting the score.")
msg += "<font color='red'>{err}</font> ".format(err=error_msg)
# find the module in question
try:
student_module = StudentModule.objects.get(
student_id=student.id,
course_id=course_key,
module_state_key=module_state_key
)
msg += _("Found module. ")
except StudentModule.DoesNotExist as err:
error_msg = _("Couldn't find module with that urlname: {url}. ").format(url=problem_location_str)
msg += "<font color='red'>{err_msg} ({err})</font>".format(err_msg=error_msg, err=err)
log.debug(error_msg)
if student_module is not None:
if "Delete student state for module" in action:
# delete the state
try:
student_module.delete()
msg += "<font color='red'>{text}</font>".format(
text=_("Deleted student module state for {state}!").format(state=module_state_key)
)
event = {
"problem": problem_location_str,
"student": unique_student_identifier,
"course": course_key.to_deprecated_string()
}
track.views.server_track(
request,
"delete-student-module-state",
event,
page="idashboard"
)
except Exception as err: # pylint: disable=broad-except
error_msg = _("Failed to delete module state for {id}/{url}. ").format(
id=unique_student_identifier, url=problem_location_str
)
msg += "<font color='red'>{err_msg} ({err})</font>".format(err_msg=error_msg, err=err)
log.exception(error_msg)
elif "Reset student's attempts" in action:
# modify the problem's state
try:
# load the state json
problem_state = json.loads(student_module.state)
old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0
# save
student_module.state = json.dumps(problem_state)
student_module.save()
event = {
"old_attempts": old_number_of_attempts,
"student": unicode(student),
"problem": student_module.module_state_key,
"instructor": unicode(request.user),
"course": course_key.to_deprecated_string()
}
track.views.server_track(request, "reset-student-attempts", event, page="idashboard")
msg += "<font color='green'>{text}</font>".format(
text=_("Module state successfully reset!")
)
except Exception as err: # pylint: disable=broad-except
error_msg = _("Couldn't reset module state for {id}/{url}. ").format(
id=unique_student_identifier, url=problem_location_str
)
msg += "<font color='red'>{err_msg} ({err})</font>".format(err_msg=error_msg, err=err)
log.exception(error_msg)
else:
# "Rescore student's problem submission" case
try:
instructor_task = submit_rescore_problem_for_student(request, module_state_key, student)
if instructor_task is None:
msg += '<font color="red">{text}</font>'.format(
text=_('Failed to create a background task for rescoring "{key}" for student {id}.').format(
key=module_state_key, id=unique_student_identifier
)
)
else:
track.views.server_track(
request,
"rescore-student-submission",
{
"problem": module_state_key,
"student": unique_student_identifier,
"course": course_key.to_deprecated_string()
},
page="idashboard"
)
except Exception as err: # pylint: disable=broad-except
msg += '<font color="red">{text}</font>'.format(
text=_('Failed to create a background task for rescoring "{key}": {id}.').format(
key=module_state_key, id=err.message
)
)
log.exception("Encountered exception from rescore: student '{0}' problem '{1}'".format(
unique_student_identifier, module_state_key
)
)
elif "Get link to student's progress page" in action:
unique_student_identifier = request.POST.get('unique_student_identifier', '')
# try to uniquely id student by email address or username
message, student = get_student_from_identifier(unique_student_identifier)
msg += message
if student is not None:
progress_url = reverse('student_progress', kwargs={
'course_id': course_key.to_deprecated_string(),
'student_id': student.id
})
track.views.server_track(
request,
"get-student-progress-page",
{
"student": unicode(student),
"instructor": unicode(request.user),
"course": course_key.to_deprecated_string()
},
page="idashboard"
)
msg += "<a href='{url}' target='_blank'>{text}</a>.".format(
url=progress_url,
text=_("Progress page for username: {username} with email address: {email}").format(
username=student.username, email=student.email
)
)
#----------------------------------------
# export grades to remote gradebook
elif action == 'List assignments available in remote gradebook':
msg2, datatable = _do_remote_gradebook(request.user, course, 'get-assignments')
msg += msg2
elif action == 'List assignments available for this course':
log.debug(action)
allgrades = get_student_grade_summary_data(request, course, get_grades=True, use_offline=use_offline)
assignments = [[x] for x in allgrades['assignments']]
datatable = {'header': [_('Assignment Name')]}
datatable['data'] = assignments
datatable['title'] = action
msg += 'assignments=<pre>%s</pre>' % assignments
elif action == 'List enrolled students matching remote gradebook':
stud_data = get_student_grade_summary_data(request, course, get_grades=False, use_offline=use_offline)
msg2, rg_stud_data = _do_remote_gradebook(request.user, course, 'get-membership')
datatable = {'header': ['Student email', 'Match?']}
rg_students = [x['email'] for x in rg_stud_data['retdata']]
def domatch(x):
return 'yes' if x.email in rg_students else 'No'
datatable['data'] = [[x.email, domatch(x)] for x in stud_data['students']]
datatable['title'] = action
elif action in ['Display grades for assignment', 'Export grades for assignment to remote gradebook',
'Export CSV file of grades for assignment']:
log.debug(action)
datatable = {}
aname = request.POST.get('assignment_name', '')
if not aname:
msg += "<font color='red'>{text}</font>".format(text=_("Please enter an assignment name"))
else:
allgrades = get_student_grade_summary_data(request, course, get_grades=True, use_offline=use_offline)
if aname not in allgrades['assignments']:
msg += "<font color='red'>{text}</font>".format(
text=_("Invalid assignment name '{name}'").format(name=aname)
)
else:
aidx = allgrades['assignments'].index(aname)
datatable = {'header': [_('External email'), aname]}
ddata = []
for student in allgrades['students']: # do one by one in case there is a student who has only partial grades
try:
ddata.append([student.email, student.grades[aidx]])
except IndexError:
log.debug('No grade for assignment {idx} ({name}) for student {email}'.format(
idx=aidx, name=aname, email=student.email)
)
datatable['data'] = ddata
datatable['title'] = _('Grades for assignment "{name}"').format(name=aname)
if 'Export CSV' in action:
# generate and return CSV file
return return_csv('grades {name}.csv'.format(name=aname), datatable)
elif 'remote gradebook' in action:
file_pointer = StringIO()
return_csv('', datatable, file_pointer=file_pointer)
file_pointer.seek(0)
files = {'datafile': file_pointer}
msg2, __ = _do_remote_gradebook(request.user, course, 'post-grades', files=files)
msg += msg2
#----------------------------------------
# Admin
elif 'List course staff' in action:
role = CourseStaffRole(course.id)
datatable = _role_members_table(role, _("List of Staff"), course_key)
track.views.server_track(request, "list-staff", {}, page="idashboard")
elif 'List course instructors' in action and GlobalStaff().has_user(request.user):
role = CourseInstructorRole(course.id)
datatable = _role_members_table(role, _("List of Instructors"), course_key)
track.views.server_track(request, "list-instructors", {}, page="idashboard")
elif action == 'Add course staff':
uname = request.POST['staffuser']
role = CourseStaffRole(course.id)
msg += add_user_to_role(request, uname, role, 'staff', 'staff')
elif action == 'Add instructor' and request.user.is_staff:
uname = request.POST['instructor']
role = CourseInstructorRole(course.id)
msg += add_user_to_role(request, uname, role, 'instructor', 'instructor')
elif action == 'Remove course staff':
uname = request.POST['staffuser']
role = CourseStaffRole(course.id)
msg += remove_user_from_role(request, uname, role, 'staff', 'staff')
elif action == 'Remove instructor' and request.user.is_staff:
uname = request.POST['instructor']
role = CourseInstructorRole(course.id)
msg += remove_user_from_role(request, uname, role, 'instructor', 'instructor')
#----------------------------------------
# DataDump
elif 'Download CSV of all student profile data' in action:
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_key,
courseenrollment__is_active=1,
).order_by('username').select_related("profile")
profkeys = ['name', 'language', 'location', 'year_of_birth', 'gender', 'level_of_education',
'mailing_address', 'goals']
datatable = {'header': ['username', 'email'] + profkeys}
def getdat(user):
"""
Return a list of profile data for the given user.
"""
profile = user.profile
return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]
datatable['data'] = [getdat(u) for u in enrolled_students]
datatable['title'] = _('Student profile data for course {course_id}').format(
course_id=course_key.to_deprecated_string()
)
return return_csv(
'profiledata_{course_id}.csv'.format(course_id=course_key.to_deprecated_string()),
datatable
)
elif 'Download CSV of all responses to problem' in action:
problem_to_dump = request.POST.get('problem_to_dump', '')
if problem_to_dump[-4:] == ".xml":
problem_to_dump = problem_to_dump[:-4]
try:
module_state_key = course_key.make_usage_key(block_type='problem', name=problem_to_dump)
smdat = StudentModule.objects.filter(
course_id=course_key,
module_state_key=module_state_key
)
smdat = smdat.order_by('student')
msg += _("Found {num} records to dump.").format(num=smdat)
except Exception as err: # pylint: disable=broad-except
msg += "<font color='red'>{text}</font><pre>{err}</pre>".format(
text=_("Couldn't find module with that urlname."),
err=escape(err)
)
smdat = []
if smdat:
datatable = {'header': ['username', 'state']}
datatable['data'] = [[x.student.username, x.state] for x in smdat]
datatable['title'] = _('Student state for problem {problem}').format(problem=problem_to_dump)
return return_csv('student_state_from_{problem}.csv'.format(problem=problem_to_dump), datatable)
elif 'Download CSV of all student anonymized IDs' in action:
students = User.objects.filter(
courseenrollment__course_id=course_key,
).order_by('id')
datatable = {'header': ['User ID', 'Anonymized User ID', 'Course Specific Anonymized User ID']}
datatable['data'] = [[s.id, unique_id_for_user(s, save=False), anonymous_id_for_user(s, course_key, save=False)] for s in students]
return return_csv(course_key.to_deprecated_string().replace('/', '-') + '-anon-ids.csv', datatable)
#----------------------------------------
# Group management
elif 'List beta testers' in action:
role = CourseBetaTesterRole(course.id)
datatable = _role_members_table(role, _("List of Beta Testers"), course_key)
track.views.server_track(request, "list-beta-testers", {}, page="idashboard")
elif action == 'Add beta testers':
users = request.POST['betausers']
log.debug("users: {0!r}".format(users))
role = CourseBetaTesterRole(course.id)
for username_or_email in split_by_comma_and_whitespace(users):
msg += "<p>{0}</p>".format(
add_user_to_role(request, username_or_email, role, 'beta testers', 'beta-tester'))
elif action == 'Remove beta testers':
users = request.POST['betausers']
role = CourseBetaTesterRole(course.id)
for username_or_email in split_by_comma_and_whitespace(users):
msg += "<p>{0}</p>".format(
remove_user_from_role(request, username_or_email, role, 'beta testers', 'beta-tester'))
#----------------------------------------
# forum administration
elif action == 'List course forum admins':
rolename = FORUM_ROLE_ADMINISTRATOR
datatable = {}
msg += _list_course_forum_members(course_key, rolename, datatable)
track.views.server_track(
request, "list-forum-admins", {"course": course_key.to_deprecated_string()}, page="idashboard"
)
elif action == 'Remove forum admin':
uname = request.POST['forumadmin']
msg += _update_forum_role_membership(uname, course, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_REMOVE)
track.views.server_track(
request, "remove-forum-admin", {"username": uname, "course": course_key.to_deprecated_string()},
page="idashboard"
)
elif action == 'Add forum admin':
uname = request.POST['forumadmin']
msg += _update_forum_role_membership(uname, course, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_ADD)
track.views.server_track(
request, "add-forum-admin", {"username": uname, "course": course_key.to_deprecated_string()},
page="idashboard"
)
elif action == 'List course forum moderators':
rolename = FORUM_ROLE_MODERATOR
datatable = {}
msg += _list_course_forum_members(course_key, rolename, datatable)
track.views.server_track(
request, "list-forum-mods", {"course": course_key.to_deprecated_string()}, page="idashboard"
)
elif action == 'Remove forum moderator':
uname = request.POST['forummoderator']
msg += _update_forum_role_membership(uname, course, FORUM_ROLE_MODERATOR, FORUM_ROLE_REMOVE)
track.views.server_track(
request, "remove-forum-mod", {"username": uname, "course": course_key.to_deprecated_string()},
page="idashboard"
)
elif action == 'Add forum moderator':
uname = request.POST['forummoderator']
msg += _update_forum_role_membership(uname, course, FORUM_ROLE_MODERATOR, FORUM_ROLE_ADD)
track.views.server_track(
request, "add-forum-mod", {"username": uname, "course": course_key.to_deprecated_string()},
page="idashboard"
)
elif action == 'List course forum community TAs':
rolename = FORUM_ROLE_COMMUNITY_TA
datatable = {}
msg += _list_course_forum_members(course_key, rolename, datatable)
track.views.server_track(
request, "list-forum-community-TAs", {"course": course_key.to_deprecated_string()},
page="idashboard"
)
elif action == 'Remove forum community TA':
uname = request.POST['forummoderator']
msg += _update_forum_role_membership(uname, course, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_REMOVE)
track.views.server_track(
request, "remove-forum-community-TA", {
"username": uname, "course": course_key.to_deprecated_string()
},
page="idashboard"
)
elif action == 'Add forum community TA':
uname = request.POST['forummoderator']
msg += _update_forum_role_membership(uname, course, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_ADD)
track.views.server_track(
request, "add-forum-community-TA", {
"username": uname, "course": course_key.to_deprecated_string()
},
page="idashboard"
)
#----------------------------------------
# enrollment
elif action == 'List students who may enroll but may not have yet signed up':
ceaset = CourseEnrollmentAllowed.objects.filter(course_id=course_key)
datatable = {'header': ['StudentEmail']}
datatable['data'] = [[x.email] for x in ceaset]
datatable['title'] = action
elif action == 'Enroll multiple students':
is_shib_course = uses_shib(course)
students = request.POST.get('multiple_students', '')
auto_enroll = bool(request.POST.get('auto_enroll'))
email_students = bool(request.POST.get('email_students'))
secure = request.is_secure()
ret = _do_enroll_students(course, course_key, students, secure=secure, auto_enroll=auto_enroll, email_students=email_students, is_shib_course=is_shib_course)
datatable = ret['datatable']
elif action == 'Unenroll multiple students':
students = request.POST.get('multiple_students', '')
email_students = bool(request.POST.get('email_students'))
ret = _do_unenroll_students(course_key, students, email_students=email_students)
datatable = ret['datatable']
elif action == 'List sections available in remote gradebook':
msg2, datatable = _do_remote_gradebook(request.user, course, 'get-sections')
msg += msg2
elif action in ['List students in section in remote gradebook',
'Overload enrollment list using remote gradebook',
'Merge enrollment list with remote gradebook']:
section = request.POST.get('gradebook_section', '')
msg2, datatable = _do_remote_gradebook(request.user, course, 'get-membership', dict(section=section))
msg += msg2
if not 'List' in action:
students = ','.join([x['email'] for x in datatable['retdata']])
overload = 'Overload' in action
secure = request.is_secure()
ret = _do_enroll_students(course, course_key, students, secure=secure, overload=overload)
datatable = ret['datatable']
#----------------------------------------
# email
elif action == 'Send email':
email_to_option = request.POST.get("to_option")
email_subject = request.POST.get("subject")
html_message = request.POST.get("message")
if bulk_email_is_enabled_for_course(course_key):
try:
# Create the CourseEmail object. This is saved immediately, so that
# any transaction that has been pending up to this point will also be
# committed.
email = CourseEmail.create(
course_key.to_deprecated_string(), request.user, email_to_option, email_subject, html_message
)
# Submit the task, so that the correct InstructorTask object gets created (for monitoring purposes)
submit_bulk_course_email(request, course_key, email.id) # pylint: disable=E1101
except Exception as err: # pylint: disable=broad-except
# Catch any errors and deliver a message to the user
error_msg = "Failed to send email! ({0})".format(err)
msg += "<font color='red'>" + error_msg + "</font>"
log.exception(error_msg)
else:
# If sending the task succeeds, deliver a success message to the user.
if email_to_option == "all":
text = _(
"Your email was successfully queued for sending. "
"Please note that for large classes, it may take up to an hour "
"(or more, if other courses are simultaneously sending email) "
"to send all emails."
)
else:
text = _('Your email was successfully queued for sending.')
email_msg = '<div class="msg msg-confirm"><p class="copy">{text}</p></div>'.format(text=text)
else:
msg += "<font color='red'>Email is not enabled for this course.</font>"
elif "Show Background Email Task History" in action:
message, datatable = get_background_task_table(course_key, task_type='bulk_course_email')
msg += message
elif "Show Background Email Task History" in action:
message, datatable = get_background_task_table(course_key, task_type='bulk_course_email')
msg += message
#----------------------------------------
# psychometrics
elif action == 'Generate Histogram and IRT Plot':
problem = request.POST['Problem']
nmsg, plots = psychoanalyze.generate_plots_for_problem(problem)
msg += nmsg
track.views.server_track(request, "psychometrics-histogram-generation", {"problem": unicode(problem)}, page="idashboard")
if idash_mode == 'Psychometrics':
problems = psychoanalyze.problems_with_psychometric_data(course_key)
#----------------------------------------
# analytics
def get_analytics_result(analytics_name):
"""Return data for an Analytic piece, or None if it doesn't exist. It
logs and swallows errors.
"""
url = settings.ANALYTICS_SERVER_URL + \
u"get?aname={}&course_id={}&apikey={}".format(
analytics_name, course_key.to_deprecated_string(), settings.ANALYTICS_API_KEY
)
try:
res = requests.get(url)
except Exception: # pylint: disable=broad-except
log.exception("Error trying to access analytics at %s", url)
return None
if res.status_code == codes.OK:
# WARNING: do not use req.json because the preloaded json doesn't
# preserve the order of the original record (hence OrderedDict).
return json.loads(res.content, object_pairs_hook=OrderedDict)
else:
log.error("Error fetching %s, code: %s, msg: %s",
url, res.status_code, res.content)
return None
analytics_results = {}
if idash_mode == 'Analytics':
DASHBOARD_ANALYTICS = [
# "StudentsAttemptedProblems", # num students who tried given problem
"StudentsDailyActivity", # active students by day
"StudentsDropoffPerDay", # active students dropoff by day
# "OverallGradeDistribution", # overall point distribution for course
"StudentsActive", # num students active in time period (default = 1wk)
"StudentsEnrolled", # num students enrolled
# "StudentsPerProblemCorrect", # foreach problem, num students correct
"ProblemGradeDistribution", # foreach problem, grade distribution
]
for analytic_name in DASHBOARD_ANALYTICS:
analytics_results[analytic_name] = get_analytics_result(analytic_name)
#----------------------------------------
# Metrics
metrics_results = {}
if settings.FEATURES.get('CLASS_DASHBOARD') and idash_mode == 'Metrics':
metrics_results['section_display_name'] = dashboard_data.get_section_display_name(course_key)
metrics_results['section_has_problem'] = dashboard_data.get_array_section_has_problem(course_key)
#----------------------------------------
# offline grades?
if use_offline:
msg += "<br/><font color='orange'>{text}</font>".format(
text=_("Grades from {course_id}").format(
course_id=offline_grades_available(course_key)
)
)
# generate list of pending background tasks
if settings.FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
instructor_tasks = get_running_instructor_tasks(course_key)
else:
instructor_tasks = None
# determine if this is a studio-backed course so we can provide a link to edit this course in studio
is_studio_course = modulestore().get_modulestore_type(course_key) != ModuleStoreEnum.Type.xml
studio_url = None
if is_studio_course:
studio_url = get_cms_course_link(course)
email_editor = None
# HTML editor for email
if idash_mode == 'Email' and is_studio_course:
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': html_message}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'dummy'))
)
fragment = html_module.render('studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": course_key.to_deprecated_string()},
usage_id_serializer=lambda usage_id: quote_slashes(usage_id.to_deprecated_string())
)
email_editor = fragment.content
# Enable instructor email only if the following conditions are met:
# 1. Feature flag is on
# 2. We have explicitly enabled email for the given course via django-admin
# 3. It is NOT an XML course
if bulk_email_is_enabled_for_course(course_key):
show_email_tab = True
# display course stats only if there is no other table to display:
course_stats = None
if not datatable:
course_stats = get_course_stats_table()
# disable buttons for large courses
disable_buttons = False
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
disable_buttons = enrollment_number > max_enrollment_for_buttons
#----------------------------------------
# context for rendering
context = {
'course': course,
'staff_access': True,
'admin_access': request.user.is_staff,
'instructor_access': instructor_access,
'forum_admin_access': forum_admin_access,
'datatable': datatable,
'course_stats': course_stats,
'msg': msg,
'modeflag': {idash_mode: 'selectedmode'},
'studio_url': studio_url,
'to_option': email_to_option, # email
'subject': email_subject, # email
'editor': email_editor, # email
'email_msg': email_msg, # email
'show_email_tab': show_email_tab, # email
'problems': problems, # psychometrics
'plots': plots, # psychometrics
'course_errors': modulestore().get_course_errors(course.id),
'instructor_tasks': instructor_tasks,
'offline_grade_log': offline_grades_available(course_key),
'cohorts_ajax_url': reverse('cohorts', kwargs={'course_key': course_key.to_deprecated_string()}),
'analytics_results': analytics_results,
'disable_buttons': disable_buttons,
'metrics_results': metrics_results,
}
context['standard_dashboard_url'] = reverse('instructor_dashboard', kwargs={'course_id': course_key.to_deprecated_string()})
return render_to_response('courseware/instructor_dashboard.html', context)
def _do_remote_gradebook(user, course, action, args=None, files=None):
'''
Perform remote gradebook action. Returns msg, datatable.
'''
rg = course.remote_gradebook
if not rg:
msg = _("No remote gradebook defined in course metadata")
return msg, {}
rgurl = settings.FEATURES.get('REMOTE_GRADEBOOK_URL', '')
if not rgurl:
msg = _("No remote gradebook url defined in settings.FEATURES")
return msg, {}
rgname = rg.get('name', '')
if not rgname:
msg = _("No gradebook name defined in course remote_gradebook metadata")
return msg, {}
if args is None:
args = {}
data = dict(submit=action, gradebook=rgname, user=user.email)
data.update(args)
try:
resp = requests.post(rgurl, data=data, verify=False, files=files)
retdict = json.loads(resp.content)
except Exception as err: # pylint: disable=broad-except
msg = _("Failed to communicate with gradebook server at {url}").format(url=rgurl) + "<br/>"
msg += _("Error: {err}").format(err=err)
msg += "<br/>resp={resp}".format(resp=resp.content)
msg += "<br/>data={data}".format(data=data)
return msg, {}
msg = '<pre>{msg}</pre>'.format(msg=retdict['msg'].replace('\n', '<br/>'))
retdata = retdict['data'] # a list of dicts
if retdata:
datatable = {'header': retdata[0].keys()}
datatable['data'] = [x.values() for x in retdata]
datatable['title'] = _('Remote gradebook response for {action}').format(action=action)
datatable['retdata'] = retdata
else:
datatable = {}
return msg, datatable
def _list_course_forum_members(course_key, rolename, datatable):
"""
Fills in datatable with forum membership information, for a given role,
so that it will be displayed on instructor dashboard.
course_ID = the CourseKey for a course
rolename = one of "Administrator", "Moderator", "Community TA"
Returns message status string to append to displayed message, if role is unknown.
"""
# make sure datatable is set up properly for display first, before checking for errors
datatable['header'] = [_('Username'), _('Full name'), _('Roles')]
datatable['title'] = _('List of Forum {name}s in course {id}').format(
name=rolename, id=course_key.to_deprecated_string()
)
datatable['data'] = []
try:
role = Role.objects.get(name=rolename, course_id=course_key)
except Role.DoesNotExist:
return '<font color="red">' + _('Error: unknown rolename "{rolename}"').format(rolename=rolename) + '</font>'
uset = role.users.all().order_by('username')
msg = 'Role = {0}'.format(rolename)
log.debug('role={0}'.format(rolename))
datatable['data'] = [[x.username, x.profile.name, ', '.join([
r.name for r in x.roles.filter(course_id=course_key).order_by('name')
])] for x in uset]
return msg
def _update_forum_role_membership(uname, course, rolename, add_or_remove):
'''
Supports adding a user to a course's forum role
uname = username string for user
course = course object
rolename = one of "Administrator", "Moderator", "Community TA"
add_or_remove = one of "add" or "remove"
Returns message status string to append to displayed message, Status is returned if user
or role is unknown, or if entry already exists when adding, or if entry doesn't exist when removing.
'''
# check that username and rolename are valid:
try:
user = User.objects.get(username=uname)
except User.DoesNotExist:
return '<font color="red">' + _('Error: unknown username "{username}"').format(username=uname) + '</font>'
try:
role = Role.objects.get(name=rolename, course_id=course.id)
except Role.DoesNotExist:
return '<font color="red">' + _('Error: unknown rolename "{rolename}"').format(rolename=rolename) + '</font>'
# check whether role already has the specified user:
alreadyexists = role.users.filter(username=uname).exists()
msg = ''
log.debug('rolename={0}'.format(rolename))
if add_or_remove == FORUM_ROLE_REMOVE:
if not alreadyexists:
msg = '<font color="red">' + _('Error: user "{username}" does not have rolename "{rolename}", cannot remove').format(username=uname, rolename=rolename) + '</font>'
else:
user.roles.remove(role)
msg = '<font color="green">' + _('Removed "{username}" from "{course_id}" forum role = "{rolename}"').format(username=user, course_id=course.id.to_deprecated_string(), rolename=rolename) + '</font>'
else:
if alreadyexists:
msg = '<font color="red">' + _('Error: user "{username}" already has rolename "{rolename}", cannot add').format(username=uname, rolename=rolename) + '</font>'
else:
if (rolename == FORUM_ROLE_ADMINISTRATOR and not has_access(user, 'staff', course)):
msg = '<font color="red">' + _('Error: user "{username}" should first be added as staff before adding as a forum administrator, cannot add').format(username=uname) + '</font>'
else:
user.roles.add(role)
msg = '<font color="green">' + _('Added "{username}" to "{course_id}" forum role = "{rolename}"').format(username=user, course_id=course.id.to_deprecated_string(), rolename=rolename) + '</font>'
return msg
def _role_members_table(role, title, course_key):
"""
Return a data table of usernames and names of users in group_name.
Arguments:
role -- a student.roles.AccessRole
title -- a descriptive title to show the user
Returns:
a dictionary with keys
'header': ['Username', 'Full name'],
'data': [[username, name] for all users]
'title': "{title} in course {course}"
"""
uset = role.users_with_role()
datatable = {'header': [_('Username'), _('Full name')]}
datatable['data'] = [[x.username, x.profile.name] for x in uset]
datatable['title'] = _('{title} in course {course_key}').format(title=title, course_key=course_key.to_deprecated_string())
return datatable
def _user_from_name_or_email(username_or_email):
"""
Return the `django.contrib.auth.User` with the supplied username or email.
If `username_or_email` contains an `@` it is treated as an email, otherwise
it is treated as the username
"""
username_or_email = strip_if_string(username_or_email)
if '@' in username_or_email:
return User.objects.get(email=username_or_email)
else:
return User.objects.get(username=username_or_email)
def add_user_to_role(request, username_or_email, role, group_title, event_name):
"""
Look up the given user by username (if no '@') or email (otherwise), and add them to group.
Arguments:
request: django request--used for tracking log
username_or_email: who to add. Decide if it's an email by presense of an '@'
group: A group name
group_title: what to call this group in messages to user--e.g. "beta-testers".
event_name: what to call this event when logging to tracking logs.
Returns:
html to insert in the message field
"""
username_or_email = strip_if_string(username_or_email)
try:
user = _user_from_name_or_email(username_or_email)
except User.DoesNotExist:
return u'<font color="red">Error: unknown username or email "{0}"</font>'.format(username_or_email)
role.add_users(user)
# Deal with historical event names
if event_name in ('staff', 'beta-tester'):
track.views.server_track(
request,
"add-or-remove-user-group",
{
"event_name": event_name,
"user": unicode(user),
"event": "add"
},
page="idashboard"
)
else:
track.views.server_track(request, "add-instructor", {"instructor": unicode(user)}, page="idashboard")
return '<font color="green">Added {0} to {1}</font>'.format(user, group_title)
def remove_user_from_role(request, username_or_email, role, group_title, event_name):
"""
Look up the given user by username (if no '@') or email (otherwise), and remove them from the supplied role.
Arguments:
request: django request--used for tracking log
username_or_email: who to remove. Decide if it's an email by presense of an '@'
role: A student.roles.AccessRole
group_title: what to call this group in messages to user--e.g. "beta-testers".
event_name: what to call this event when logging to tracking logs.
Returns:
html to insert in the message field
"""
username_or_email = strip_if_string(username_or_email)
try:
user = _user_from_name_or_email(username_or_email)
except User.DoesNotExist:
return u'<font color="red">Error: unknown username or email "{0}"</font>'.format(username_or_email)
role.remove_users(user)
# Deal with historical event names
if event_name in ('staff', 'beta-tester'):
track.views.server_track(
request,
"add-or-remove-user-group",
{
"event_name": event_name,
"user": unicode(user),
"event": "remove"
},
page="idashboard"
)
else:
track.views.server_track(request, "remove-instructor", {"instructor": unicode(user)}, page="idashboard")
return '<font color="green">Removed {0} from {1}</font>'.format(user, group_title)
class GradeTable(object):
"""
Keep track of grades, by student, for all graded assignment
components. Each student's grades are stored in a list. The
index of this list specifies the assignment component. Not
all lists have the same length, because at the start of going
through the set of grades, it is unknown what assignment
compoments exist. This is because some students may not do
all the assignment components.
The student grades are then stored in a dict, with the student
id as the key.
"""
def __init__(self):
self.components = OrderedDict()
self.grades = {}
self._current_row = {}
def _add_grade_to_row(self, component, score):
"""Creates component if needed, and assigns score
Args:
component (str): Course component being graded
score (float): Score of student on component
Returns:
None
"""
component_index = self.components.setdefault(component, len(self.components))
self._current_row[component_index] = score
@contextmanager
def add_row(self, student_id):
"""Context management for a row of grades
Uses a new dictionary to get all grades of a specified student
and closes by adding that dict to the internal table.
Args:
student_id (str): Student id that is having grades set
"""
self._current_row = {}
yield self._add_grade_to_row
self.grades[student_id] = self._current_row
def get_grade(self, student_id):
"""Retrieves padded list of grades for specified student
Args:
student_id (str): Student ID for desired grades
Returns:
list: Ordered list of grades for student
"""
row = self.grades.get(student_id, [])
ncomp = len(self.components)
return [row.get(comp, None) for comp in range(ncomp)]
def get_graded_components(self):
"""
Return a list of components that have been
discovered so far.
"""
return self.components.keys()
def get_student_grade_summary_data(request, course, get_grades=True, get_raw_scores=False, use_offline=False):
"""
Return data arrays with student identity and grades for specified course.
course = CourseDescriptor
course_key = course ID
Note: both are passed in, only because instructor_dashboard already has them already.
returns datatable = dict(header=header, data=data)
where
header = list of strings labeling the data fields
data = list (one per student) of lists of data corresponding to the fields
If get_raw_scores=True, then instead of grade summaries, the raw grades for all graded modules are returned.
"""
course_key = course.id
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_key,
courseenrollment__is_active=1,
).prefetch_related("groups").order_by('username')
header = [_('ID'), _('Username'), _('Full Name'), _('edX email'), _('External email')]
datatable = {'header': header, 'students': enrolled_students}
data = []
gtab = GradeTable()
for student in enrolled_students:
datarow = [student.id, student.username, student.profile.name, student.email]
try:
datarow.append(student.externalauthmap.external_email)
except: # ExternalAuthMap.DoesNotExist
datarow.append('')
if get_grades:
gradeset = student_grades(student, request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline)
log.debug('student={0}, gradeset={1}'.format(student, gradeset))
with gtab.add_row(student.id) as add_grade:
if get_raw_scores:
# TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned']
for score in gradeset['raw_scores']:
add_grade(score.section, getattr(score, 'earned', score[0]))
else:
for grade_item in gradeset['section_breakdown']:
add_grade(grade_item['label'], grade_item['percent'])
student.grades = gtab.get_grade(student.id)
data.append(datarow)
# if getting grades, need to do a second pass, and add grades to each datarow;
# on the first pass we don't know all the graded components
if get_grades:
for datarow in data:
# get grades for student
sgrades = gtab.get_grade(datarow[0])
datarow += sgrades
# get graded components and add to table header
assignments = gtab.get_graded_components()
header += assignments
datatable['assignments'] = assignments
datatable['data'] = data
return datatable
#-----------------------------------------------------------------------------
# Gradebook has moved to instructor.api.spoc_gradebook #
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def grade_summary(request, course_key):
"""Display the grade summary for a course."""
course = get_course_with_access(request.user, 'staff', course_key)
# For now, just a page
context = {'course': course,
'staff_access': True, }
return render_to_response('courseware/grade_summary.html', context)
#-----------------------------------------------------------------------------
# enrollment
def _do_enroll_students(course, course_key, students, secure=False, overload=False, auto_enroll=False, email_students=False, is_shib_course=False):
"""
Do the actual work of enrolling multiple students, presented as a string
of emails separated by commas or returns
`course` is course object
`course_key` id of course (a CourseKey)
`students` string of student emails separated by commas or returns (a `str`)
`overload` un-enrolls all existing students (a `boolean`)
`auto_enroll` is user input preference (a `boolean`)
`email_students` is user input preference (a `boolean`)
"""
new_students, new_students_lc = get_and_clean_student_list(students)
status = dict([x, 'unprocessed'] for x in new_students)
if overload: # delete all but staff
todelete = CourseEnrollment.objects.filter(course_id=course_key)
for ce in todelete:
if not has_access(ce.user, 'staff', course) and ce.user.email.lower() not in new_students_lc:
status[ce.user.email] = 'deleted'
ce.deactivate()
else:
status[ce.user.email] = 'is staff'
ceaset = CourseEnrollmentAllowed.objects.filter(course_id=course_key)
for cea in ceaset:
status[cea.email] = 'removed from pending enrollment list'
ceaset.delete()
if email_students:
protocol = 'https' if secure else 'http'
stripped_site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
# TODO: Use request.build_absolute_uri rather than '{proto}://{site}{path}'.format
# and check with the Services team that this works well with microsites
registration_url = '{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('student.views.register_user')
)
course_url = '{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('course_root', kwargs={'course_id': course_key.to_deprecated_string()})
)
# We can't get the url to the course's About page if the marketing site is enabled.
course_about_url = None
if not settings.FEATURES.get('ENABLE_MKTG_SITE', False):
course_about_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('about_course', kwargs={'course_id': course_key.to_deprecated_string()})
)
# Composition of email
d = {
'site_name': stripped_site_name,
'registration_url': registration_url,
'course': course,
'auto_enroll': auto_enroll,
'course_url': course_url,
'course_about_url': course_about_url,
'is_shib_course': is_shib_course
}
for student in new_students:
try:
user = User.objects.get(email=student)
except User.DoesNotExist:
#Student not signed up yet, put in pending enrollment allowed table
cea = CourseEnrollmentAllowed.objects.filter(email=student, course_id=course_key)
#If enrollmentallowed already exists, update auto_enroll flag to however it was set in UI
#Will be 0 or 1 records as there is a unique key on email + course_id
if cea:
cea[0].auto_enroll = auto_enroll
cea[0].save()
status[student] = 'user does not exist, enrollment already allowed, pending with auto enrollment ' \
+ ('on' if auto_enroll else 'off')
continue
#EnrollmentAllowed doesn't exist so create it
cea = CourseEnrollmentAllowed(email=student, course_id=course_key, auto_enroll=auto_enroll)
cea.save()
status[student] = 'user does not exist, enrollment allowed, pending with auto enrollment ' \
+ ('on' if auto_enroll else 'off')
if email_students:
# User is allowed to enroll but has not signed up yet
d['email_address'] = student
d['message'] = 'allowed_enroll'
send_mail_ret = send_mail_to_student(student, d)
status[student] += (', email sent' if send_mail_ret else '')
continue
# Student has already registered
if CourseEnrollment.is_enrolled(user, course_key):
status[student] = 'already enrolled'
continue
try:
# Not enrolled yet
CourseEnrollment.enroll(user, course_key)
status[student] = 'added'
if email_students:
# User enrolled for first time, populate dict with user specific info
d['email_address'] = student
d['full_name'] = user.profile.name
d['message'] = 'enrolled_enroll'
send_mail_ret = send_mail_to_student(student, d)
status[student] += (', email sent' if send_mail_ret else '')
except:
status[student] = 'rejected'
datatable = {'header': ['StudentEmail', 'action']}
datatable['data'] = [[x, status[x]] for x in sorted(status)]
datatable['title'] = _('Enrollment of students')
def sf(stat):
return [x for x in status if status[x] == stat]
data = dict(added=sf('added'), rejected=sf('rejected') + sf('exists'),
deleted=sf('deleted'), datatable=datatable)
return data
#Unenrollment
def _do_unenroll_students(course_key, students, email_students=False):
"""
Do the actual work of un-enrolling multiple students, presented as a string
of emails separated by commas or returns
`course_key` is id of course (a `str`)
`students` is string of student emails separated by commas or returns (a `str`)
`email_students` is user input preference (a `boolean`)
"""
old_students, __ = get_and_clean_student_list(students)
status = dict([x, 'unprocessed'] for x in old_students)
stripped_site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
if email_students:
course = modulestore().get_course(course_key)
#Composition of email
d = {'site_name': stripped_site_name,
'course': course}
for student in old_students:
isok = False
cea = CourseEnrollmentAllowed.objects.filter(course_id=course_key, email=student)
#Will be 0 or 1 records as there is a unique key on email + course_id
if cea:
cea[0].delete()
status[student] = "un-enrolled"
isok = True
try:
user = User.objects.get(email=student)
except User.DoesNotExist:
if isok and email_students:
#User was allowed to join but had not signed up yet
d['email_address'] = student
d['message'] = 'allowed_unenroll'
send_mail_ret = send_mail_to_student(student, d)
status[student] += (', email sent' if send_mail_ret else '')
continue
#Will be 0 or 1 records as there is a unique key on user + course_id
if CourseEnrollment.is_enrolled(user, course_key):
try:
CourseEnrollment.unenroll(user, course_key)
status[student] = "un-enrolled"
if email_students:
#User was enrolled
d['email_address'] = student
d['full_name'] = user.profile.name
d['message'] = 'enrolled_unenroll'
send_mail_ret = send_mail_to_student(student, d)
status[student] += (', email sent' if send_mail_ret else '')
except Exception: # pylint: disable=broad-except
if not isok:
status[student] = "Error! Failed to un-enroll"
datatable = {'header': ['StudentEmail', 'action']}
datatable['data'] = [[x, status[x]] for x in sorted(status)]
datatable['title'] = _('Un-enrollment of students')
data = dict(datatable=datatable)
return data
def send_mail_to_student(student, param_dict):
"""
Construct the email using templates and then send it.
`student` is the student's email address (a `str`),
`param_dict` is a `dict` with keys [
`site_name`: name given to edX instance (a `str`)
`registration_url`: url for registration (a `str`)
`course_key`: id of course (a CourseKey)
`auto_enroll`: user input option (a `str`)
`course_url`: url of course (a `str`)
`email_address`: email of student (a `str`)
`full_name`: student full name (a `str`)
`message`: type of email to send and template to use (a `str`)
`is_shib_course`: (a `boolean`)
]
Returns a boolean indicating whether the email was sent successfully.
"""
# add some helpers and microconfig subsitutions
if 'course' in param_dict:
param_dict['course_name'] = param_dict['course'].display_name_with_default
param_dict['site_name'] = microsite.get_value(
'SITE_NAME',
param_dict.get('site_name', '')
)
subject = None
message = None
message_type = param_dict['message']
email_template_dict = {
'allowed_enroll': ('emails/enroll_email_allowedsubject.txt', 'emails/enroll_email_allowedmessage.txt'),
'enrolled_enroll': ('emails/enroll_email_enrolledsubject.txt', 'emails/enroll_email_enrolledmessage.txt'),
'allowed_unenroll': ('emails/unenroll_email_subject.txt', 'emails/unenroll_email_allowedmessage.txt'),
'enrolled_unenroll': ('emails/unenroll_email_subject.txt', 'emails/unenroll_email_enrolledmessage.txt'),
}
subject_template, message_template = email_template_dict.get(message_type, (None, None))
if subject_template is not None and message_template is not None:
subject = render_to_string(subject_template, param_dict)
message = render_to_string(message_template, param_dict)
if subject and message:
# Remove leading and trailing whitespace from body
message = message.strip()
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
send_mail(subject, message, from_address, [student], fail_silently=False)
return True
else:
return False
def get_and_clean_student_list(students):
"""
Separate out individual student email from the comma, or space separated string.
`students` is string of student emails separated by commas or returns (a `str`)
Returns:
students: list of cleaned student emails
students_lc: list of lower case cleaned student emails
"""
students = split_by_comma_and_whitespace(students)
students = [unicode(s.strip()) for s in students]
students = [s for s in students if s != '']
students_lc = [x.lower() for x in students]
return students, students_lc
#-----------------------------------------------------------------------------
# answer distribution
def get_answers_distribution(request, course_key):
"""
Get the distribution of answers for all graded problems in the course.
Return a dict with two keys:
'header': a header row
'data': a list of rows
"""
course = get_course_with_access(request.user, 'staff', course_key)
dist = grades.answer_distributions(course.id)
d = {}
d['header'] = ['url_name', 'display name', 'answer id', 'answer', 'count']
d['data'] = [
[url_name, display_name, answer_id, a, answers[a]]
for (url_name, display_name, answer_id), answers in sorted(dist.items())
for a in answers
]
return d
#-----------------------------------------------------------------------------
def compute_course_stats(course):
"""
Compute course statistics, including number of problems, videos, html.
course is a CourseDescriptor from the xmodule system.
"""
# walk the course by using get_children() until we come to the leaves; count the
# number of different leaf types
counts = defaultdict(int)
def walk(module):
children = module.get_children()
category = module.__class__.__name__ # HtmlDescriptor, CapaDescriptor, ...
counts[category] += 1
for c in children:
walk(c)
walk(course)
stats = dict(counts) # number of each kind of module
return stats
def dump_grading_context(course):
"""
Dump information about course grading context (eg which problems are graded in what assignments)
Very useful for debugging grading_policy.json and policy.json
"""
msg = "-----------------------------------------------------------------------------\n"
msg += "Course grader:\n"
msg += '%s\n' % course.grader.__class__
graders = {}
if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):
msg += '\n'
msg += "Graded sections:\n"
for subgrader, category, weight in course.grader.sections:
msg += " subgrader=%s, type=%s, category=%s, weight=%s\n" % (subgrader.__class__, subgrader.type, category, weight)
subgrader.index = 1
graders[subgrader.type] = subgrader
msg += "-----------------------------------------------------------------------------\n"
msg += "Listing grading context for course %s\n" % course.id
gcontext = course.grading_context
msg += "graded sections:\n"
msg += '%s\n' % gcontext['graded_sections'].keys()
for (gsections, gsvals) in gcontext['graded_sections'].items():
msg += "--> Section %s:\n" % (gsections)
for sec in gsvals:
sdesc = sec['section_descriptor']
grade_format = getattr(sdesc, 'grade_format', None)
aname = ''
if grade_format in graders:
gfmt = graders[grade_format]
aname = '%s %02d' % (gfmt.short_label, gfmt.index)
gfmt.index += 1
elif sdesc.display_name in graders:
gfmt = graders[sdesc.display_name]
aname = '%s' % gfmt.short_label
notes = ''
if getattr(sdesc, 'score_by_attempt', False):
notes = ', score by attempt!'
msg += " %s (grade_format=%s, Assignment=%s%s)\n" % (s.display_name, grade_format, aname, notes)
msg += "all descriptors:\n"
msg += "length=%d\n" % len(gcontext['all_descriptors'])
msg = '<pre>%s</pre>' % msg.replace('<', '<')
return msg
def get_background_task_table(course_key, problem_url=None, student=None, task_type=None):
"""
Construct the "datatable" structure to represent background task history.
Filters the background task history to the specified course and problem.
If a student is provided, filters to only those tasks for which that student
was specified.
Returns a tuple of (msg, datatable), where the msg is a possible error message,
and the datatable is the datatable to be used for display.
"""
history_entries = get_instructor_task_history(course_key, problem_url, student, task_type)
datatable = {}
msg = ""
# first check to see if there is any history at all
# (note that we don't have to check that the arguments are valid; it
# just won't find any entries.)
if (history_entries.count()) == 0:
if problem_url is None:
msg += '<font color="red">Failed to find any background tasks for course "{course}".</font>'.format(
course=course_key.to_deprecated_string()
)
elif student is not None:
template = '<font color="red">' + _('Failed to find any background tasks for course "{course}", module "{problem}" and student "{student}".') + '</font>'
msg += template.format(course=course_key.to_deprecated_string(), problem=problem_url, student=student.username)
else:
msg += '<font color="red">' + _('Failed to find any background tasks for course "{course}" and module "{problem}".').format(
course=course_key.to_deprecated_string(), problem=problem_url
) + '</font>'
else:
datatable['header'] = ["Task Type",
"Task Id",
"Requester",
"Submitted",
"Duration (sec)",
"Task State",
"Task Status",
"Task Output"]
datatable['data'] = []
for instructor_task in history_entries:
# get duration info, if known:
duration_sec = 'unknown'
if hasattr(instructor_task, 'task_output') and instructor_task.task_output is not None:
task_output = json.loads(instructor_task.task_output)
if 'duration_ms' in task_output:
duration_sec = int(task_output['duration_ms'] / 1000.0)
# get progress status message:
success, task_message = get_task_completion_info(instructor_task)
status = "Complete" if success else "Incomplete"
# generate row for this task:
row = [
str(instructor_task.task_type),
str(instructor_task.task_id),
str(instructor_task.requester),
instructor_task.created.isoformat(' '),
duration_sec,
str(instructor_task.task_state),
status,
task_message
]
datatable['data'].append(row)
if problem_url is None:
datatable['title'] = "{course_id}".format(course_id=course_key.to_deprecated_string())
elif student is not None:
datatable['title'] = "{course_id} > {location} > {student}".format(
course_id=course_key.to_deprecated_string(),
location=problem_url,
student=student.username
)
else:
datatable['title'] = "{course_id} > {location}".format(
course_id=course_key.to_deprecated_string(), location=problem_url
)
return msg, datatable
def uses_shib(course):
"""
Used to return whether course has Shibboleth as the enrollment domain
Returns a boolean indicating if Shibboleth authentication is set for this course.
"""
return course.enrollment_domain and course.enrollment_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX)
| agpl-3.0 |
ABaldwinHunter/django-clone | django/conf/locale/az/formats.py | 1059 | 1267 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y г.'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j E Y г. G:i'
YEAR_MONTH_FORMAT = 'F Y г.'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
sclabs/sccms-nonrel | django/utils/crypto.py | 245 | 1443 | """
Django's standard crypto functions and utilities.
"""
import hmac
from django.conf import settings
from django.utils.hashcompat import sha_constructor, sha_hmac
def salted_hmac(key_salt, value, secret=None):
"""
Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = sha_constructor(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=value, digestmod=sha_hmac)
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
| bsd-3-clause |
arbrandes/edx-platform | cms/djangoapps/api/v1/tests/test_views/test_course_runs.py | 4 | 17096 | """Tests for Course run views"""
import datetime
from unittest.mock import patch # lint-amnesty, pylint: disable=unused-import
import ddt
import pytz
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import RequestFactory, override_settings
from django.urls import reverse
from opaque_keys.edx.keys import CourseKey
from organizations.api import add_organization, get_course_organizations
from rest_framework.test import APIClient
from common.djangoapps.student.models import CourseAccessRole
from common.djangoapps.student.tests.factories import TEST_PASSWORD, AdminFactory, UserFactory
from openedx.core.lib.courses import course_image_url
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ToyCourseFactory
from ...serializers.course_runs import CourseRunSerializer
from ..utils import serialize_datetime
@ddt.ddt
class CourseRunViewSetTests(ModuleStoreTestCase):
"""
Tests for creating course runs
"""
list_url = reverse('api:v1:course_run-list')
def setUp(self):
super().setUp()
self.client = APIClient()
user = AdminFactory()
self.client.login(username=user.username, password=TEST_PASSWORD)
def get_course_run_data(self, user, start, end, pacing_type, role='instructor'):
return {
'title': 'Testing 101',
'org': 'TestingX',
'number': 'Testing101x',
'run': '3T2017',
'schedule': {
'start': serialize_datetime(start),
'end': serialize_datetime(end),
},
'team': [
{
'user': user.username,
'role': role,
}
],
'pacing_type': pacing_type,
}
def assert_course_run_schedule(self, course_run, start, end):
assert course_run.start == start
assert course_run.end == end
def assert_access_role(self, course_run, user, role):
# An error will be raised if the endpoint did not create the role
assert CourseAccessRole.objects.filter(
course_id=course_run.id, org=course_run.id.org, user=user, role=role).count() == 1
def assert_course_access_role_count(self, course_run, expected):
assert CourseAccessRole.objects.filter(course_id=course_run.id).count() == expected
def get_serializer_context(self):
return {'request': RequestFactory().get('')}
def test_without_authentication(self):
self.client.logout()
response = self.client.get(self.list_url)
assert response.status_code == 401
def test_without_authorization(self):
user = UserFactory(is_staff=False)
self.client.login(username=user.username, password=TEST_PASSWORD)
response = self.client.get(self.list_url)
assert response.status_code == 403
def test_list(self):
course_runs = CourseFactory.create_batch(3)
response = self.client.get(self.list_url)
assert response.status_code == 200
# Order matters for the assertion
course_runs = sorted(course_runs, key=lambda course_run: str(course_run.id))
actual = sorted(response.data['results'], key=lambda course_run: course_run['id'])
assert actual == CourseRunSerializer(course_runs, many=True, context=self.get_serializer_context()).data
def test_retrieve(self):
course_run = CourseFactory()
url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
response = self.client.get(url)
assert response.status_code == 200
assert response.data == CourseRunSerializer(course_run, context=self.get_serializer_context()).data
def test_retrieve_not_found(self):
url = reverse('api:v1:course_run-detail', kwargs={'pk': 'course-v1:TestX+Test101x+1T2017'})
response = self.client.get(url)
assert response.status_code == 404
def test_update_not_found(self):
url = reverse('api:v1:course_run-detail', kwargs={'pk': 'course-v1:TestX+Test101x+1T2017'})
response = self.client.put(url, {})
assert response.status_code == 404
def test_update(self):
course_run = CourseFactory(start=None, end=None)
assert CourseAccessRole.objects.filter(course_id=course_run.id).count() == 0
url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
end = start + datetime.timedelta(days=30)
title = 'A New Testing Strategy'
user = UserFactory()
role = 'staff'
data = {
'title': title,
'schedule': {
'start': serialize_datetime(start),
'end': serialize_datetime(end),
},
'team': [
{
'user': user.username,
'role': role,
}
],
}
response = self.client.put(url, data, format='json')
assert response.status_code == 200
self.assert_access_role(course_run, user, role)
self.assert_course_access_role_count(course_run, 1)
course_run = modulestore().get_course(course_run.id)
assert response.data == CourseRunSerializer(course_run, context=self.get_serializer_context()).data
assert course_run.display_name == title
self.assert_course_run_schedule(course_run, start, end)
def test_update_with_invalid_user(self):
course_run = CourseFactory()
url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
data = {
'title': course_run.display_name,
'team': [
{
'user': 'test-user',
'role': 'staff',
}
]
}
response = self.client.put(url, data, format='json')
assert response.status_code == 400
assert response.data == {'team': ['Course team user does not exist']}
def test_update_with_pacing_type(self):
"""
Test that update run updates the pacing type
"""
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
course_run = CourseFactory(start=start, end=None, self_paced=False)
data = {
'pacing_type': 'self_paced',
}
course_run_detail_url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
response = self.client.patch(course_run_detail_url, data, format='json')
assert response.status_code == 200
course_run = modulestore().get_course(course_run.id)
assert course_run.self_paced is True
self.assert_course_run_schedule(course_run, start, None)
def test_update_with_instructor_role(self):
"""
Test that update creates a new instructor role only if it does not exist
"""
instructor_role = 'instructor'
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
new_user = UserFactory()
course_run = CourseFactory(start=start, end=None, self_paced=False)
assert CourseAccessRole.objects.filter(course_id=course_run.id).count() == 0
data = {
'team': [
{
'user': new_user.username,
'role': instructor_role,
},
],
'pacing_type': 'self_paced',
}
course_run_detail_url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
response = self.client.patch(course_run_detail_url, data, format='json')
assert response.status_code == 200
self.assert_access_role(course_run, new_user, instructor_role)
self.assert_course_access_role_count(course_run, 1)
# Requesting again with the same data should not create new instructor role
response = self.client.patch(course_run_detail_url, data, format='json')
assert response.status_code == 200
self.assert_access_role(course_run, new_user, instructor_role)
self.assert_course_access_role_count(course_run, 1)
def test_update_with_multiple_roles(self):
"""
Test that update creates an instructor role for a user in addition to any other role/roles he already has
"""
staff_role = 'staff'
instructor_role = 'instructor'
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
course_run = CourseFactory(start=start, end=None, self_paced=False)
existing_user = UserFactory()
CourseAccessRole.objects.create(
course_id=course_run.id, org=course_run.id.org, role=staff_role, user=existing_user
)
# existing_user already has a staff role in the course
# The request should create an additional instructor role for existing_user
new_user = UserFactory()
assert CourseAccessRole.objects.filter(course_id=course_run.id).count() == 1
data = {
'team': [
{
'user': existing_user.username,
'role': instructor_role,
},
{
'user': new_user.username,
'role': instructor_role,
},
],
}
course_run_detail_url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
response = self.client.patch(course_run_detail_url, data, format='json')
assert response.status_code == 200
self.assert_access_role(course_run, existing_user, instructor_role)
self.assert_access_role(course_run, new_user, instructor_role)
self.assert_course_access_role_count(course_run, 3)
@ddt.data(
('instructor_paced', False),
('self_paced', True),
)
@ddt.unpack
def test_create(self, pacing_type, expected_self_paced_value):
"""Tests successful course run creation"""
user = UserFactory()
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
end = start + datetime.timedelta(days=30)
role = 'staff'
data = self.get_course_run_data(user, start, end, pacing_type, role)
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, 201)
course_run_key = CourseKey.from_string(response.data['id'])
course_run = modulestore().get_course(course_run_key)
self.assertEqual(course_run.display_name, data['title'])
self.assertEqual(course_run.id.org, data['org'])
self.assertEqual(course_run.id.course, data['number'])
self.assertEqual(course_run.id.run, data['run'])
self.assertEqual(course_run.self_paced, expected_self_paced_value)
self.assert_course_run_schedule(course_run, start, end)
self.assert_access_role(course_run, user, role)
self.assert_course_access_role_count(course_run, 1)
def test_create_with_invalid_course_team(self):
"""
Tests that if the course team user is invalid, it returns bad request status
with expected validation message
"""
user = UserFactory()
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
end = start + datetime.timedelta(days=30)
data = self.get_course_run_data(user, start, end, 'self-paced')
data['team'] = [{'user': 'invalid-username'}]
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, 400)
self.assertDictContainsSubset({'team': ['Course team user does not exist']}, response.data)
def test_images_upload(self):
# http://www.django-rest-framework.org/api-guide/parsers/#fileuploadparser
course_run = CourseFactory()
expected_filename = 'course_image.png'
content_key = StaticContent.compute_location(course_run.id, expected_filename)
assert course_run.course_image != expected_filename
try:
contentstore().find(content_key)
self.fail('No image should be associated with a new course run.')
except NotFoundError:
pass
url = reverse('api:v1:course_run-images', kwargs={'pk': str(course_run.id)})
# PNG. Single black pixel
content = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS' \
b'\xde\x00\x00\x00\x0cIDATx\x9cc```\x00\x00\x00\x04\x00\x01\xf6\x178U\x00\x00\x00\x00IEND\xaeB`\x82'
# We are intentionally passing the incorrect JPEG extension here
upload = SimpleUploadedFile('card_image.jpg', content, content_type='image/png')
response = self.client.post(url, {'card_image': upload}, format='multipart')
assert response.status_code == 200
course_run = modulestore().get_course(course_run.id)
assert course_run.course_image == expected_filename
expected = {'card_image': RequestFactory().get('').build_absolute_uri(course_image_url(course_run))}
assert response.data == expected
# There should now be an image stored
contentstore().find(content_key)
@override_settings(ORGANIZATIONS_AUTOCREATE=False)
@ddt.data(
('instructor_paced', False, 'NotOriginalNumber1x'),
('self_paced', True, None),
)
@ddt.unpack
def test_rerun(self, pacing_type, expected_self_paced_value, number):
original_course_run = ToyCourseFactory()
add_organization({
'name': 'Test Organization',
'short_name': original_course_run.id.org, # lint-amnesty, pylint: disable=no-member
'description': 'Testing Organization Description',
})
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
end = start + datetime.timedelta(days=30)
user = UserFactory()
role = 'instructor'
run = '3T2017'
url = reverse('api:v1:course_run-rerun', kwargs={'pk': str(original_course_run.id)}) # lint-amnesty, pylint: disable=no-member
data = {
'run': run,
'schedule': {
'start': serialize_datetime(start),
'end': serialize_datetime(end),
},
'team': [
{
'user': user.username,
'role': role,
}
],
'pacing_type': pacing_type,
}
# If number is supplied, this should become the course number used in the course run key
# If not, it should default to the original course run number that the rerun is based on.
if number:
data.update({'number': number})
response = self.client.post(url, data, format='json')
assert response.status_code == 201
course_run_key = CourseKey.from_string(response.data['id'])
course_run = modulestore().get_course(course_run_key)
assert course_run.id.run == run
assert course_run.self_paced is expected_self_paced_value
if number:
assert course_run.id.course == number
assert course_run.id.course != original_course_run.id.course # lint-amnesty, pylint: disable=no-member
else:
assert course_run.id.course == original_course_run.id.course # lint-amnesty, pylint: disable=no-member
self.assert_course_run_schedule(course_run, start, end)
self.assert_access_role(course_run, user, role)
self.assert_course_access_role_count(course_run, 1)
course_orgs = get_course_organizations(course_run_key)
self.assertEqual(len(course_orgs), 1)
self.assertEqual(course_orgs[0]['short_name'], original_course_run.id.org) # lint-amnesty, pylint: disable=no-member
def test_rerun_duplicate_run(self):
course_run = ToyCourseFactory()
url = reverse('api:v1:course_run-rerun', kwargs={'pk': str(course_run.id)})
data = {
'run': course_run.id.run,
}
response = self.client.post(url, data, format='json')
assert response.status_code == 400
assert response.data == {'run': [f'Course run {course_run.id} already exists']}
def test_rerun_invalid_number(self):
course_run = ToyCourseFactory()
url = reverse('api:v1:course_run-rerun', kwargs={'pk': str(course_run.id)})
data = {
'run': '2T2019',
'number': '!@#$%^&*()',
}
response = self.client.post(url, data, format='json')
assert response.status_code == 400
assert response.data == {'non_field_errors': [
'Invalid key supplied. Ensure there are no special characters in the Course Number.'
]}
| agpl-3.0 |
frappe/erpnext | erpnext/accounts/report/item_wise_sales_register/item_wise_sales_register.py | 2 | 18491 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.utils import flt, cstr
from frappe.model.meta import get_field_precision
from frappe.utils.xlsxutils import handle_html
from erpnext.accounts.report.sales_register.sales_register import get_mode_of_payments
from erpnext.selling.report.item_wise_sales_history.item_wise_sales_history import get_item_details, get_customer_details
def execute(filters=None):
return _execute(filters)
def _execute(filters=None, additional_table_columns=None, additional_query_columns=None):
if not filters: filters = {}
columns = get_columns(additional_table_columns, filters)
company_currency = frappe.get_cached_value('Company', filters.get('company'), 'default_currency')
item_list = get_items(filters, additional_query_columns)
if item_list:
itemised_tax, tax_columns = get_tax_accounts(item_list, columns, company_currency)
mode_of_payments = get_mode_of_payments(set(d.parent for d in item_list))
so_dn_map = get_delivery_notes_against_sales_order(item_list)
data = []
total_row_map = {}
skip_total_row = 0
prev_group_by_value = ''
if filters.get('group_by'):
grand_total = get_grand_total(filters, 'Sales Invoice')
customer_details = get_customer_details()
item_details = get_item_details()
for d in item_list:
customer_record = customer_details.get(d.customer)
item_record = item_details.get(d.item_code)
delivery_note = None
if d.delivery_note:
delivery_note = d.delivery_note
elif d.so_detail:
delivery_note = ", ".join(so_dn_map.get(d.so_detail, []))
if not delivery_note and d.update_stock:
delivery_note = d.parent
row = {
'item_code': d.item_code,
'item_name': item_record.item_name if item_record else d.item_name,
'item_group': item_record.item_group if item_record else d.item_group,
'description': d.description,
'invoice': d.parent,
'posting_date': d.posting_date,
'customer': d.customer,
'customer_name': customer_record.customer_name,
'customer_group': customer_record.customer_group,
}
if additional_query_columns:
for col in additional_query_columns:
row.update({
col: d.get(col)
})
row.update({
'debit_to': d.debit_to,
'mode_of_payment': ", ".join(mode_of_payments.get(d.parent, [])),
'territory': d.territory,
'project': d.project,
'company': d.company,
'sales_order': d.sales_order,
'delivery_note': d.delivery_note,
'income_account': d.unrealized_profit_loss_account or d.income_account,
'cost_center': d.cost_center,
'stock_qty': d.stock_qty,
'stock_uom': d.stock_uom
})
if d.stock_uom != d.uom and d.stock_qty:
row.update({
'rate': (d.base_net_rate * d.qty)/d.stock_qty,
'amount': d.base_net_amount
})
else:
row.update({
'rate': d.base_net_rate,
'amount': d.base_net_amount
})
total_tax = 0
for tax in tax_columns:
item_tax = itemised_tax.get(d.name, {}).get(tax, {})
row.update({
frappe.scrub(tax + ' Rate'): item_tax.get('tax_rate', 0),
frappe.scrub(tax + ' Amount'): item_tax.get('tax_amount', 0),
})
total_tax += flt(item_tax.get('tax_amount'))
row.update({
'total_tax': total_tax,
'total': d.base_net_amount + total_tax,
'currency': company_currency
})
if filters.get('group_by'):
row.update({'percent_gt': flt(row['total']/grand_total) * 100})
group_by_field, subtotal_display_field = get_group_by_and_display_fields(filters)
data, prev_group_by_value = add_total_row(data, filters, prev_group_by_value, d, total_row_map,
group_by_field, subtotal_display_field, grand_total, tax_columns)
add_sub_total_row(row, total_row_map, d.get(group_by_field, ''), tax_columns)
data.append(row)
if filters.get('group_by') and item_list:
total_row = total_row_map.get(prev_group_by_value or d.get('item_name'))
total_row['percent_gt'] = flt(total_row['total']/grand_total * 100)
data.append(total_row)
data.append({})
add_sub_total_row(total_row, total_row_map, 'total_row', tax_columns)
data.append(total_row_map.get('total_row'))
skip_total_row = 1
return columns, data, None, None, None, skip_total_row
def get_columns(additional_table_columns, filters):
columns = []
if filters.get('group_by') != ('Item'):
columns.extend(
[
{
'label': _('Item Code'),
'fieldname': 'item_code',
'fieldtype': 'Link',
'options': 'Item',
'width': 120
},
{
'label': _('Item Name'),
'fieldname': 'item_name',
'fieldtype': 'Data',
'width': 120
}
]
)
if filters.get('group_by') not in ('Item', 'Item Group'):
columns.extend([
{
'label': _('Item Group'),
'fieldname': 'item_group',
'fieldtype': 'Link',
'options': 'Item Group',
'width': 120
}
])
columns.extend([
{
'label': _('Description'),
'fieldname': 'description',
'fieldtype': 'Data',
'width': 150
},
{
'label': _('Invoice'),
'fieldname': 'invoice',
'fieldtype': 'Link',
'options': 'Sales Invoice',
'width': 120
},
{
'label': _('Posting Date'),
'fieldname': 'posting_date',
'fieldtype': 'Date',
'width': 120
}
])
if filters.get('group_by') != 'Customer':
columns.extend([
{
'label': _('Customer Group'),
'fieldname': 'customer_group',
'fieldtype': 'Link',
'options': 'Customer Group',
'width': 120
}
])
if filters.get('group_by') not in ('Customer', 'Customer Group'):
columns.extend([
{
'label': _('Customer'),
'fieldname': 'customer',
'fieldtype': 'Link',
'options': 'Customer',
'width': 120
},
{
'label': _('Customer Name'),
'fieldname': 'customer_name',
'fieldtype': 'Data',
'width': 120
}
])
if additional_table_columns:
columns += additional_table_columns
columns += [
{
'label': _('Receivable Account'),
'fieldname': 'debit_to',
'fieldtype': 'Link',
'options': 'Account',
'width': 80
},
{
'label': _('Mode Of Payment'),
'fieldname': 'mode_of_payment',
'fieldtype': 'Data',
'width': 120
}
]
if filters.get('group_by') != 'Territory':
columns.extend([
{
'label': _('Territory'),
'fieldname': 'territory',
'fieldtype': 'Link',
'options': 'Territory',
'width': 80
}
])
columns += [
{
'label': _('Project'),
'fieldname': 'project',
'fieldtype': 'Link',
'options': 'Project',
'width': 80
},
{
'label': _('Company'),
'fieldname': 'company',
'fieldtype': 'Link',
'options': 'Company',
'width': 80
},
{
'label': _('Sales Order'),
'fieldname': 'sales_order',
'fieldtype': 'Link',
'options': 'Sales Order',
'width': 100
},
{
'label': _("Delivery Note"),
'fieldname': 'delivery_note',
'fieldtype': 'Link',
'options': 'Delivery Note',
'width': 100
},
{
'label': _('Income Account'),
'fieldname': 'income_account',
'fieldtype': 'Link',
'options': 'Account',
'width': 100
},
{
'label': _("Cost Center"),
'fieldname': 'cost_center',
'fieldtype': 'Link',
'options': 'Cost Center',
'width': 100
},
{
'label': _('Stock Qty'),
'fieldname': 'stock_qty',
'fieldtype': 'Float',
'width': 100
},
{
'label': _('Stock UOM'),
'fieldname': 'stock_uom',
'fieldtype': 'Link',
'options': 'UOM',
'width': 100
},
{
'label': _('Rate'),
'fieldname': 'rate',
'fieldtype': 'Float',
'options': 'currency',
'width': 100
},
{
'label': _('Amount'),
'fieldname': 'amount',
'fieldtype': 'Currency',
'options': 'currency',
'width': 100
}
]
if filters.get('group_by'):
columns.append({
'label': _('% Of Grand Total'),
'fieldname': 'percent_gt',
'fieldtype': 'Float',
'width': 80
})
return columns
def get_conditions(filters):
conditions = ""
for opts in (("company", " and company=%(company)s"),
("customer", " and `tabSales Invoice`.customer = %(customer)s"),
("item_code", " and `tabSales Invoice Item`.item_code = %(item_code)s"),
("from_date", " and `tabSales Invoice`.posting_date>=%(from_date)s"),
("to_date", " and `tabSales Invoice`.posting_date<=%(to_date)s")):
if filters.get(opts[0]):
conditions += opts[1]
if filters.get("mode_of_payment"):
conditions += """ and exists(select name from `tabSales Invoice Payment`
where parent=`tabSales Invoice`.name
and ifnull(`tabSales Invoice Payment`.mode_of_payment, '') = %(mode_of_payment)s)"""
if filters.get("warehouse"):
conditions += """and ifnull(`tabSales Invoice Item`.warehouse, '') = %(warehouse)s"""
if filters.get("brand"):
conditions += """and ifnull(`tabSales Invoice Item`.brand, '') = %(brand)s"""
if filters.get("item_group"):
conditions += """and ifnull(`tabSales Invoice Item`.item_group, '') = %(item_group)s"""
if not filters.get("group_by"):
conditions += "ORDER BY `tabSales Invoice`.posting_date desc, `tabSales Invoice Item`.item_group desc"
else:
conditions += get_group_by_conditions(filters, 'Sales Invoice')
return conditions
def get_group_by_conditions(filters, doctype):
if filters.get("group_by") == 'Invoice':
return "ORDER BY `tab{0} Item`.parent desc".format(doctype)
elif filters.get("group_by") == 'Item':
return "ORDER BY `tab{0} Item`.`item_code`".format(doctype)
elif filters.get("group_by") == 'Item Group':
return "ORDER BY `tab{0} Item`.{1}".format(doctype, frappe.scrub(filters.get('group_by')))
elif filters.get("group_by") in ('Customer', 'Customer Group', 'Territory', 'Supplier'):
return "ORDER BY `tab{0}`.{1}".format(doctype, frappe.scrub(filters.get('group_by')))
def get_items(filters, additional_query_columns):
conditions = get_conditions(filters)
if additional_query_columns:
additional_query_columns = ', ' + ', '.join(additional_query_columns)
else:
additional_query_columns = ''
return frappe.db.sql("""
select
`tabSales Invoice Item`.name, `tabSales Invoice Item`.parent,
`tabSales Invoice`.posting_date, `tabSales Invoice`.debit_to,
`tabSales Invoice`.unrealized_profit_loss_account,
`tabSales Invoice`.project, `tabSales Invoice`.customer, `tabSales Invoice`.remarks,
`tabSales Invoice`.territory, `tabSales Invoice`.company, `tabSales Invoice`.base_net_total,
`tabSales Invoice Item`.item_code, `tabSales Invoice Item`.description,
`tabSales Invoice Item`.`item_name`, `tabSales Invoice Item`.`item_group`,
`tabSales Invoice Item`.sales_order, `tabSales Invoice Item`.delivery_note,
`tabSales Invoice Item`.income_account, `tabSales Invoice Item`.cost_center,
`tabSales Invoice Item`.stock_qty, `tabSales Invoice Item`.stock_uom,
`tabSales Invoice Item`.base_net_rate, `tabSales Invoice Item`.base_net_amount,
`tabSales Invoice`.customer_name, `tabSales Invoice`.customer_group, `tabSales Invoice Item`.so_detail,
`tabSales Invoice`.update_stock, `tabSales Invoice Item`.uom, `tabSales Invoice Item`.qty {0}
from `tabSales Invoice`, `tabSales Invoice Item`
where `tabSales Invoice`.name = `tabSales Invoice Item`.parent
and `tabSales Invoice`.docstatus = 1 {1}
""".format(additional_query_columns or '', conditions), filters, as_dict=1) #nosec
def get_delivery_notes_against_sales_order(item_list):
so_dn_map = frappe._dict()
so_item_rows = list(set([d.so_detail for d in item_list]))
if so_item_rows:
delivery_notes = frappe.db.sql("""
select parent, so_detail
from `tabDelivery Note Item`
where docstatus=1 and so_detail in (%s)
group by so_detail, parent
""" % (', '.join(['%s']*len(so_item_rows))), tuple(so_item_rows), as_dict=1)
for dn in delivery_notes:
so_dn_map.setdefault(dn.so_detail, []).append(dn.parent)
return so_dn_map
def get_grand_total(filters, doctype):
return frappe.db.sql(""" SELECT
SUM(`tab{0}`.base_grand_total)
FROM `tab{0}`
WHERE `tab{0}`.docstatus = 1
and posting_date between %s and %s
""".format(doctype), (filters.get('from_date'), filters.get('to_date')))[0][0] #nosec
def get_deducted_taxes():
return frappe.db.sql_list("select name from `tabPurchase Taxes and Charges` where add_deduct_tax = 'Deduct'")
def get_tax_accounts(item_list, columns, company_currency,
doctype='Sales Invoice', tax_doctype='Sales Taxes and Charges'):
import json
item_row_map = {}
tax_columns = []
invoice_item_row = {}
itemised_tax = {}
tax_amount_precision = get_field_precision(frappe.get_meta(tax_doctype).get_field('tax_amount'),
currency=company_currency) or 2
for d in item_list:
invoice_item_row.setdefault(d.parent, []).append(d)
item_row_map.setdefault(d.parent, {}).setdefault(d.item_code or d.item_name, []).append(d)
conditions = ""
if doctype == "Purchase Invoice":
conditions = " and category in ('Total', 'Valuation and Total') and base_tax_amount_after_discount_amount != 0"
deducted_tax = get_deducted_taxes()
tax_details = frappe.db.sql("""
select
name, parent, description, item_wise_tax_detail,
charge_type, base_tax_amount_after_discount_amount
from `tab%s`
where
parenttype = %s and docstatus = 1
and (description is not null and description != '')
and parent in (%s)
%s
order by description
""" % (tax_doctype, '%s', ', '.join(['%s']*len(invoice_item_row)), conditions),
tuple([doctype] + list(invoice_item_row)))
for name, parent, description, item_wise_tax_detail, charge_type, tax_amount in tax_details:
description = handle_html(description)
if description not in tax_columns and tax_amount:
# as description is text editor earlier and markup can break the column convention in reports
tax_columns.append(description)
if item_wise_tax_detail:
try:
item_wise_tax_detail = json.loads(item_wise_tax_detail)
for item_code, tax_data in item_wise_tax_detail.items():
itemised_tax.setdefault(item_code, frappe._dict())
if isinstance(tax_data, list):
tax_rate, tax_amount = tax_data
else:
tax_rate = tax_data
tax_amount = 0
if charge_type == 'Actual' and not tax_rate:
tax_rate = 'NA'
item_net_amount = sum([flt(d.base_net_amount)
for d in item_row_map.get(parent, {}).get(item_code, [])])
for d in item_row_map.get(parent, {}).get(item_code, []):
item_tax_amount = flt((tax_amount * d.base_net_amount) / item_net_amount) \
if item_net_amount else 0
if item_tax_amount:
tax_value = flt(item_tax_amount, tax_amount_precision)
tax_value = (tax_value * -1
if (doctype == 'Purchase Invoice' and name in deducted_tax) else tax_value)
itemised_tax.setdefault(d.name, {})[description] = frappe._dict({
'tax_rate': tax_rate,
'tax_amount': tax_value
})
except ValueError:
continue
elif charge_type == 'Actual' and tax_amount:
for d in invoice_item_row.get(parent, []):
itemised_tax.setdefault(d.name, {})[description] = frappe._dict({
'tax_rate': 'NA',
'tax_amount': flt((tax_amount * d.base_net_amount) / d.base_net_total,
tax_amount_precision)
})
tax_columns.sort()
for desc in tax_columns:
columns.append({
'label': _(desc + ' Rate'),
'fieldname': frappe.scrub(desc + ' Rate'),
'fieldtype': 'Float',
'width': 100
})
columns.append({
'label': _(desc + ' Amount'),
'fieldname': frappe.scrub(desc + ' Amount'),
'fieldtype': 'Currency',
'options': 'currency',
'width': 100
})
columns += [
{
'label': _('Total Tax'),
'fieldname': 'total_tax',
'fieldtype': 'Currency',
'options': 'currency',
'width': 100
},
{
'label': _('Total'),
'fieldname': 'total',
'fieldtype': 'Currency',
'options': 'currency',
'width': 100
},
{
'fieldname': 'currency',
'label': _('Currency'),
'fieldtype': 'Currency',
'width': 80,
'hidden': 1
}
]
return itemised_tax, tax_columns
def add_total_row(data, filters, prev_group_by_value, item, total_row_map,
group_by_field, subtotal_display_field, grand_total, tax_columns):
if prev_group_by_value != item.get(group_by_field, ''):
if prev_group_by_value:
total_row = total_row_map.get(prev_group_by_value)
data.append(total_row)
data.append({})
add_sub_total_row(total_row, total_row_map, 'total_row', tax_columns)
prev_group_by_value = item.get(group_by_field, '')
total_row_map.setdefault(item.get(group_by_field, ''), {
subtotal_display_field: get_display_value(filters, group_by_field, item),
'stock_qty': 0.0,
'amount': 0.0,
'bold': 1,
'total_tax': 0.0,
'total': 0.0,
'percent_gt': 0.0
})
total_row_map.setdefault('total_row', {
subtotal_display_field: 'Total',
'stock_qty': 0.0,
'amount': 0.0,
'bold': 1,
'total_tax': 0.0,
'total': 0.0,
'percent_gt': 0.0
})
return data, prev_group_by_value
def get_display_value(filters, group_by_field, item):
if filters.get('group_by') == 'Item':
if item.get('item_code') != item.get('item_name'):
value = cstr(item.get('item_code')) + "<br><br>" + \
"<span style='font-weight: normal'>" + cstr(item.get('item_name')) + "</span>"
else:
value = item.get('item_code', '')
elif filters.get('group_by') in ('Customer', 'Supplier'):
party = frappe.scrub(filters.get('group_by'))
if item.get(party) != item.get(party+'_name'):
value = item.get(party) + "<br><br>" + \
"<span style='font-weight: normal'>" + item.get(party+'_name') + "</span>"
else:
value = item.get(party)
else:
value = item.get(group_by_field)
return value
def get_group_by_and_display_fields(filters):
if filters.get('group_by') == 'Item':
group_by_field = 'item_code'
subtotal_display_field = 'invoice'
elif filters.get('group_by') == 'Invoice':
group_by_field = 'parent'
subtotal_display_field = 'item_code'
else:
group_by_field = frappe.scrub(filters.get('group_by'))
subtotal_display_field = 'item_code'
return group_by_field, subtotal_display_field
def add_sub_total_row(item, total_row_map, group_by_value, tax_columns):
total_row = total_row_map.get(group_by_value)
total_row['stock_qty'] += item['stock_qty']
total_row['amount'] += item['amount']
total_row['total_tax'] += item['total_tax']
total_row['total'] += item['total']
total_row['percent_gt'] += item['percent_gt']
for tax in tax_columns:
total_row.setdefault(frappe.scrub(tax + ' Amount'), 0.0)
total_row[frappe.scrub(tax + ' Amount')] += flt(item[frappe.scrub(tax + ' Amount')])
| gpl-3.0 |
sinkuri256/python-for-android | python3-alpha/extra_modules/atom/mock_http.py | 48 | 4452 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockRequest(object):
"""Holds parameters of an HTTP request for matching against future requests.
"""
def __init__(self, operation, url, data=None, headers=None):
self.operation = operation
if isinstance(url, str):
url = atom.url.parse_url(url)
self.url = url
self.data = data
self.headers = headers
class MockResponse(atom.http_interface.HttpResponse):
"""Simulates an httplib.HTTPResponse object."""
def __init__(self, body=None, status=None, reason=None, headers=None):
if body and hasattr(body, 'read'):
self.body = body.read()
else:
self.body = body
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def read(self):
return self.body
class MockHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None, recordings=None, real_client=None):
"""An HttpClient which responds to request with stored data.
The request-response pairs are stored as tuples in a member list named
recordings.
The MockHttpClient can be switched from replay mode to record mode by
setting the real_client member to an instance of an HttpClient which will
make real HTTP requests and store the server's response in list of
recordings.
Args:
headers: dict containing HTTP headers which should be included in all
HTTP requests.
recordings: The initial recordings to be used for responses. This list
contains tuples in the form: (MockRequest, MockResponse)
real_client: An HttpClient which will make a real HTTP request. The
response will be converted into a MockResponse and stored in
recordings.
"""
self.recordings = recordings or []
self.real_client = real_client
self.headers = headers or {}
def add_response(self, response, operation, url, data=None, headers=None):
"""Adds a request-response pair to the recordings list.
After the recording is added, future matching requests will receive the
response.
Args:
response: MockResponse
operation: str
url: str
data: str, Currently the data is ignored when looking for matching
requests.
headers: dict of strings: Currently the headers are ignored when
looking for matching requests.
"""
request = MockRequest(operation, url, data=data, headers=headers)
self.recordings.append((request, response))
def request(self, operation, url, data=None, headers=None):
"""Returns a matching MockResponse from the recordings.
If the real_client is set, the request will be passed along and the
server's response will be added to the recordings and also returned.
If there is no match, a NoRecordingFound error will be raised.
"""
if self.real_client is None:
if isinstance(url, str):
url = atom.url.parse_url(url)
for recording in self.recordings:
if recording[0].operation == operation and recording[0].url == url:
return recording[1]
raise NoRecordingFound('No recodings found for %s %s' % (
operation, url))
else:
# There is a real HTTP client, so make the request, and record the
# response.
response = self.real_client.request(operation, url, data=data,
headers=headers)
# TODO: copy the headers
stored_response = MockResponse(body=response, status=response.status,
reason=response.reason)
self.add_response(stored_response, operation, url, data=data,
headers=headers)
return stored_response
| apache-2.0 |
equialgo/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
andreaazzara/pyot | contiki-tres/cpu/cc2430/bank-alloc.py | 9 | 7895 | #!/usr/bin/env python
# Copyright (c) 2010, Loughborough University - Computer Science
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# This file is part of the Contiki operating system.
# \file
# Automatic allocation of modules to code segments for bankable builds
# with SDCC's huge memory model.
#
# \author
# George Oikonomou - <oikonomou@users.sourceforge.net>
import sys
import re
import operator
import fileinput
import os
# Open a module object file (.rel) and read it's code size
def retrieve_module_size(file_name):
size_pat = re.compile('^A\s+(?:HOME|BANK[0-9])\s+size\s+([1-9A-F][0-9A-F]*)')
for code_line in open(file_name):
matches = size_pat.search(code_line)
if matches is not None:
return int(matches.group(1), 16)
return 0
# Searches for a code segment rule for file_name in the segment_rules file
# If there is a rule, we respect it. Otherwise, we can move the file around
def get_source_seg(source_file, object_file, segment_rules):
for line in open(segment_rules):
tokens = line.split(None)
match = re.search(tokens[1], source_file)
if match is not None:
# Save it in basename.seg
base, ext = os.path.splitext(object_file)
of = open(base + '.seg', 'w')
of.write(tokens[0] + '\n')
of.close
return tokens[0]
return None
# If segment.rules specified a rule for a source file, the respective object
# file's banking requirement will be stored in object_file.seg
def get_object_seg(object_file):
base, ext = os.path.splitext(object_file)
seg = base + '.seg'
bank = None
if os.path.isfile(seg) is True:
of = open(base + '.seg', 'r')
bank = of.readline().strip()
of.close()
return bank
# Open project.mem and retreive the project's total code footprint
def get_total_size(project):
mem_file = project + '.mem'
pat = re.compile('FLASH\s+(0x[0-9a-f]+\s+){2}([0-9]+)')
for line in open(mem_file):
l = pat.search(line)
if l is not None:
return int(l.group(2))
# Open project.map and retrieve the list of modules linked in
# This will only consider contiki sources, not SDCC libraries
# NB: Sometimes object filenames get truncated:
# contiki-sensinode.lib [ obj_sensinode/watchdog-cc2430.re ]
# See how for this file the 'l' in 'rel' is missing. For that reason, we retrieve
# the filaname until the last '.' but without the extension and we append 'rel'
# As long as the filename doesn't get truncated, we're good
def populate(project, modules, segment_rules, bins):
bankable_total = 0
user_total = 0
map_file = project + '.map'
file_pat = re.compile('obj_sensinode[^ ]+\.')
for line in open(map_file):
file_name = file_pat.search(line)
if file_name is not None:
mod = file_name.group(0) + 'rel'
code_size = retrieve_module_size(mod)
seg = get_object_seg(mod)
if seg is not None:
# This module has been assigned to a bank by the user
#print 'In', seg, file_name.group(0), 'size', code_size
bins[seg][0] += code_size
user_total += code_size
else:
# We are free to allocate this module
modules.append([mod, code_size, "NONE"])
bankable_total += code_size
return bankable_total, user_total
# Allocate bankable modules to banks according to a simple
# 'first fit, decreasing' bin packing heuristic.
def bin_pack(modules, bins, offset, log):
if offset==1:
bins['HOME'][1] -= 4096
# Sort by size, descending, in=place
modules.sort(key=operator.itemgetter(1), reverse=True)
for module in modules:
# We want to iterate in a specific order and dict.keys() won't do that
for bin_id in ['BANK1', 'BANK2', 'BANK3', 'HOME']:
if bins[bin_id][0] + module[1] < bins[bin_id][1]:
bins[bin_id][0] += module[1]
module[2] = bin_id
log.writelines(' '.join([module[2].ljust(8), \
str(module[1]).rjust(5), module[0], '\n']))
break
else:
if bin_id == 'HOME':
print "Failed to allocate", module[0], "with size", module[1], \
"to a code bank. This is fatal"
return 1
return 0
# Hack the new bank directly in the .rel file
def relocate(module, bank):
code_pat = re.compile('(A\s+)(?:HOME|BANK[0-9])(\s+size\s+[1-9A-F][0-9A-F]*.+\n)')
for line in fileinput.input(module, inplace=1):
m = code_pat.search(line)
if m is not None:
line = m.group(1) + bank + m.group(2)
sys.stdout.write(line)
return
if len(sys.argv) < 3:
print 'Usage:'
print 'bank-alloc.py project path_to_segment_rules [offset]'
print 'bank-alloc.py source_file path_to_segment_rules object_file'
sys.exit(1)
modules = list()
file_name = sys.argv[1]
segment_rules = sys.argv[2]
# Magic: Guess whether we want to determine the code bank for a code file
# or whether we want to bin-pack
basename, ext = os.path.splitext(file_name)
if ext == '.c':
# Code Segment determination
if len(sys.argv) < 4:
print 'Usage:'
print 'bank-alloc.py project path_to_segment_rules [offset]'
print 'bank-alloc.py source_file path_to_segment_rules object_file'
sys.exit(1)
object_file = sys.argv[3]
seg = get_source_seg(file_name, object_file, segment_rules)
if seg is None:
print "BANK1"
else:
print seg
exit()
# Bin-Packing
offset = 0
if len(sys.argv) > 3 and sys.argv[3] is not None:
offset = int(sys.argv[3])
sizes = {'total': 0, 'bankable': 0, 'user': 0, 'libs': 0}
# Name : [Allocated, capacity]
bins = {
'HOME': [0, 32768],
'BANK1': [0, 32768],
'BANK2': [0, 32768],
'BANK3': [0, 30720]
}
sizes['total'] = get_total_size(basename)
sizes['bankable'], sizes['user'] = populate(basename, modules, segment_rules, bins)
sizes['libs'] = sizes['total'] - sizes['bankable'] - sizes['user']
print 'Total Size =', sizes['total'], 'bytes (' + \
str(sizes['bankable']), 'bankable,', \
str(sizes['user']), 'user-allocated,', \
str(sizes['libs']), 'const+libs)'
bins['HOME'][0] += sizes['libs']
print 'Preallocations: HOME=' + str(bins['HOME'][0]) + \
', BANK1=' + str(bins['BANK1'][0]) + ', BANK2=' + str(bins['BANK2'][0]) + \
', BANK3=' + str(bins['BANK3'][0])
# Open a log file
of = open(basename + '.banks', 'w')
pack = bin_pack(modules, bins, offset, of)
of.close()
print "Bin-Packing results (target allocation):"
print "Segment - max - alloc"
for bin_id in ['HOME', 'BANK1', 'BANK2', 'BANK3']:
print bin_id.rjust(7), str(bins[bin_id][1]).rjust(6), str(bins[bin_id][0]).rjust(6)
if pack > 0:
sys.exit(1)
# If we reach here we seem to have a sane allocation. Start changing .rel files
for module in modules:
relocate(module[0], module[2])
| gpl-3.0 |
collinprice/titanium_mobile | node_modules/ioslib/node_modules/node-ios-device/node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | 2878 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/scipy/optimize/nnls.py | 116 | 1423 | from __future__ import division, print_function, absolute_import
from . import _nnls
from numpy import asarray_chkfinite, zeros, double
__all__ = ['nnls']
def nnls(A, b):
"""
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper
for a FORTAN non-negative least squares solver.
Parameters
----------
A : ndarray
Matrix ``A`` as shown above.
b : ndarray
Right-hand side vector.
Returns
-------
x : ndarray
Solution vector.
rnorm : float
The residual, ``|| Ax-b ||_2``.
Notes
-----
The FORTRAN code was published in the book below. The algorithm
is an active set method. It solves the KKT (Karush-Kuhn-Tucker)
conditions for the non-negative least squares problem.
References
----------
Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM
"""
A, b = map(asarray_chkfinite, (A, b))
if len(A.shape) != 2:
raise ValueError("expected matrix")
if len(b.shape) != 1:
raise ValueError("expected vector")
m, n = A.shape
if m != b.shape[0]:
raise ValueError("incompatible dimensions")
w = zeros((n,), dtype=double)
zz = zeros((m,), dtype=double)
index = zeros((n,), dtype=int)
x, rnorm, mode = _nnls.nnls(A, m, n, b, w, zz, index)
if mode != 1:
raise RuntimeError("too many iterations")
return x, rnorm
| agpl-3.0 |
pythonitalia/assopy | assopy/auth_backends.py | 5 | 5285 | # -*- coding: UTF-8 -*-
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from django.db import transaction
from assopy import models
from assopy import settings
if settings.GENRO_BACKEND:
from assopy.clients import genro
import logging
log = logging.getLogger('assopy.auth')
class _AssopyBackend(ModelBackend):
def linkUser(self, user):
"""
collega l'utente assopy passato con il backend; crea l'utente remoto se
necessario.
"""
if not settings.GENRO_BACKEND:
return user
if user.assopy_id:
return user
name = unicode(user.user).encode('utf-8')
if not user.user.is_active:
log.info('cannot link a remote user to "%s": it\'s not active', name)
return
log.info('a remote user is needed for "%s"', name)
# il lookup con l'email può ritornare più di un id; non è un
# problema dato che associo lo user con il backend solo quando ho
# verificato l'email (e la verifica non è necessaria se si loggano
# con janrain), quindi posso usare una qualsiasi delle identità
# remote. Poi un giorno implementeremo il merge delle identità.
rid = genro.users(email=user.user.email)['r0']
if rid is not None:
log.info('an existing match with the email "%s" is found: %s', user.user.email, rid)
user.assopy_id = rid
user.save()
genro.user_remote2local(user)
else:
rid = genro.create_user(user.user.first_name, user.user.last_name, user.user.email)
log.info('new remote user id: %s', rid)
user.assopy_id = rid
user.save()
return user
def get_user(self, user_id):
# ridefinisco la get_user per assicurarmi che l'utente django e quello
# assopy vengano recuperarti utilizzando una sola query. Da che
# l'utilizzo tipo nei template è:
# {{ request.user.assopy_user.name }}
# questa select_related mi permette di ridurre il numero di query da 3
# a 1:
# request.user -> 1 query
# .assopy_user -> 1 query
# .name -> 1 query (tra l'altro per recuperare
# nuovamente l'utente django)
try:
return User.objects\
.select_related('assopy_user')\
.get(pk=user_id)
except User.DoesNotExist:
return None
class IdBackend(_AssopyBackend):
"""
backend utilizzato solo internamente per autenticare utenti dato il loro id
(senza bisogno di password).
"""
def authenticate(self, uid=None):
try:
user = User.objects.select_related('assopy_user').get(pk=uid, is_active=True)
auser = user.assopy_user
if auser is None:
# questo utente esiste su django ma non ha un utente assopy
# collegato; probabilmente è un admin inserito prima del
# collegamento con il backend.
auser = models.User(user=user)
auser.save()
models.user_created.send(sender=auser, profile_complete=True)
self.linkUser(auser)
return user
except User.DoesNotExist:
return None
class EmailBackend(_AssopyBackend):
def authenticate(self, email=None, password=None):
try:
user = User.objects.select_related('assopy_user').get(email__iexact=email, is_active=True)
if user.check_password(password):
auser = user.assopy_user
if auser is None:
# questo utente esiste su django ma non ha un utente assopy
# collegato; probabilmente è un admin inserito prima del
# collegamento con il backend.
auser = models.User(user=user)
auser.save()
models.user_created.send(sender=auser, profile_complete=True)
self.linkUser(auser)
return user
except User.MultipleObjectsReturned:
return None
except User.DoesNotExist:
# nel db di django non c'è un utente con quella email, ma potrebbe
# esserci un utente legacy nel backend di ASSOPY
if not settings.GENRO_BACKEND or not settings.SEARCH_MISSING_USERS_ON_BACKEND:
return None
rid = genro.users(email=email, password=password)['r0']
if rid is not None:
log.info('"%s" is a valid remote user; a local user is needed', email)
auser = models.User.objects.create_user(email, password=password, active=True, assopy_id=rid, send_mail=False)
return auser.user
else:
return None
class JanRainBackend(_AssopyBackend):
def authenticate(self, identifier=None):
try:
i = models.UserIdentity.objects.select_related('user__user').get(identifier=identifier, user__user__is_active=True)
except models.UserIdentity.DoesNotExist:
return None
else:
self.linkUser(i.user)
return i.user.user
| bsd-2-clause |
joram/sickbeard-orange | lib/unidecode/x021.py | 82 | 3963 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'K', # 0x2a
'A', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'F', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'F', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
' 1/3 ', # 0x53
' 2/3 ', # 0x54
' 1/5 ', # 0x55
' 2/5 ', # 0x56
' 3/5 ', # 0x57
' 4/5 ', # 0x58
' 1/6 ', # 0x59
' 5/6 ', # 0x5a
' 1/8 ', # 0x5b
' 3/8 ', # 0x5c
' 5/8 ', # 0x5d
' 7/8 ', # 0x5e
' 1/', # 0x5f
'I', # 0x60
'II', # 0x61
'III', # 0x62
'IV', # 0x63
'V', # 0x64
'VI', # 0x65
'VII', # 0x66
'VIII', # 0x67
'IX', # 0x68
'X', # 0x69
'XI', # 0x6a
'XII', # 0x6b
'L', # 0x6c
'C', # 0x6d
'D', # 0x6e
'M', # 0x6f
'i', # 0x70
'ii', # 0x71
'iii', # 0x72
'iv', # 0x73
'v', # 0x74
'vi', # 0x75
'vii', # 0x76
'viii', # 0x77
'ix', # 0x78
'x', # 0x79
'xi', # 0x7a
'xii', # 0x7b
'l', # 0x7c
'c', # 0x7d
'd', # 0x7e
'm', # 0x7f
'(D', # 0x80
'D)', # 0x81
'((|))', # 0x82
')', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'-', # 0x90
'|', # 0x91
'-', # 0x92
'|', # 0x93
'-', # 0x94
'|', # 0x95
'\\', # 0x96
'/', # 0x97
'\\', # 0x98
'/', # 0x99
'-', # 0x9a
'-', # 0x9b
'~', # 0x9c
'~', # 0x9d
'-', # 0x9e
'|', # 0x9f
'-', # 0xa0
'|', # 0xa1
'-', # 0xa2
'-', # 0xa3
'-', # 0xa4
'|', # 0xa5
'-', # 0xa6
'|', # 0xa7
'|', # 0xa8
'-', # 0xa9
'-', # 0xaa
'-', # 0xab
'-', # 0xac
'-', # 0xad
'-', # 0xae
'|', # 0xaf
'|', # 0xb0
'|', # 0xb1
'|', # 0xb2
'|', # 0xb3
'|', # 0xb4
'|', # 0xb5
'^', # 0xb6
'V', # 0xb7
'\\', # 0xb8
'=', # 0xb9
'V', # 0xba
'^', # 0xbb
'-', # 0xbc
'-', # 0xbd
'|', # 0xbe
'|', # 0xbf
'-', # 0xc0
'-', # 0xc1
'|', # 0xc2
'|', # 0xc3
'=', # 0xc4
'|', # 0xc5
'=', # 0xc6
'=', # 0xc7
'|', # 0xc8
'=', # 0xc9
'|', # 0xca
'=', # 0xcb
'=', # 0xcc
'=', # 0xcd
'=', # 0xce
'=', # 0xcf
'=', # 0xd0
'|', # 0xd1
'=', # 0xd2
'|', # 0xd3
'=', # 0xd4
'|', # 0xd5
'\\', # 0xd6
'/', # 0xd7
'\\', # 0xd8
'/', # 0xd9
'=', # 0xda
'=', # 0xdb
'~', # 0xdc
'~', # 0xdd
'|', # 0xde
'|', # 0xdf
'-', # 0xe0
'|', # 0xe1
'-', # 0xe2
'|', # 0xe3
'-', # 0xe4
'-', # 0xe5
'-', # 0xe6
'|', # 0xe7
'-', # 0xe8
'|', # 0xe9
'|', # 0xea
'|', # 0xeb
'|', # 0xec
'|', # 0xed
'|', # 0xee
'|', # 0xef
'-', # 0xf0
'\\', # 0xf1
'\\', # 0xf2
'|', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
StrellaGroup/frappe | frappe/integrations/oauth2_logins.py | 10 | 1068 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
from frappe.utils.oauth import login_via_oauth2, login_via_oauth2_id_token
import json
@frappe.whitelist(allow_guest=True)
def login_via_google(code, state):
login_via_oauth2("google", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_github(code, state):
login_via_oauth2("github", code, state)
@frappe.whitelist(allow_guest=True)
def login_via_facebook(code, state):
login_via_oauth2("facebook", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_frappe(code, state):
login_via_oauth2("frappe", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_office365(code, state):
login_via_oauth2_id_token("office_365", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_salesforce(code, state):
login_via_oauth2("salesforce", code, state, decoder=json.loads)
| mit |
Versent/ansible | lib/ansible/plugins/lookup/dnstxt.py | 69 | 2188 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
HAVE_DNS=False
try:
import dns.resolver
from dns.exception import DNSException
HAVE_DNS=True
except ImportError:
pass
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
# ==============================================================
# DNSTXT: DNS TXT records
#
# key=domainname
# TODO: configurable resolver IPs
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if HAVE_DNS == False:
raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
domain = term.split()[0]
string = []
try:
answers = dns.resolver.query(domain, 'TXT')
for rdata in answers:
s = rdata.to_text()
string.append(s[1:-1]) # Strip outside quotes on TXT rdata
except dns.resolver.NXDOMAIN:
string = 'NXDOMAIN'
except dns.resolver.Timeout:
string = ''
except dns.exception.DNSException as e:
raise AnsibleError("dns.resolver unhandled exception", e)
ret.append(''.join(string))
return ret
| gpl-3.0 |
KamLii/Databaes | Databaes/urls.py | 1 | 1369 | """Databaes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
#from account.views import UserRegistrationFormView, LoginView, logout_view
from . import views
from user_profile.views import SignupView
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^$', views.homepage, name='home'),
url(r'^admin/', admin.site.urls),
url(r'^crate/', include('Crate.urls')),
url(r"^account/signup/$", SignupView.as_view(), name="account_signup"),
url(r"^account/", include("account.urls")),
url(r'^payments/', include('pinax.stripe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit |
kennedyshead/home-assistant | homeassistant/components/ipp/__init__.py | 2 | 4560 | """The Internet Printing Protocol (IPP) integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from pyipp import IPP, IPPError, Printer as IPPPrinter
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_NAME,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
CONF_BASE_PATH,
DOMAIN,
)
PLATFORMS = [SENSOR_DOMAIN]
SCAN_INTERVAL = timedelta(seconds=60)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up IPP from a config entry."""
hass.data.setdefault(DOMAIN, {})
coordinator = hass.data[DOMAIN].get(entry.entry_id)
if not coordinator:
# Create IPP instance for this entry
coordinator = IPPDataUpdateCoordinator(
hass,
host=entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
base_path=entry.data[CONF_BASE_PATH],
tls=entry.data[CONF_SSL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
)
hass.data[DOMAIN][entry.entry_id] = coordinator
await coordinator.async_config_entry_first_refresh()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class IPPDataUpdateCoordinator(DataUpdateCoordinator[IPPPrinter]):
"""Class to manage fetching IPP data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
*,
host: str,
port: int,
base_path: str,
tls: bool,
verify_ssl: bool,
) -> None:
"""Initialize global IPP data updater."""
self.ipp = IPP(
host=host,
port=port,
base_path=base_path,
tls=tls,
verify_ssl=verify_ssl,
session=async_get_clientsession(hass, verify_ssl),
)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self) -> IPPPrinter:
"""Fetch data from IPP."""
try:
return await self.ipp.printer()
except IPPError as error:
raise UpdateFailed(f"Invalid response from API: {error}") from error
class IPPEntity(CoordinatorEntity):
"""Defines a base IPP entity."""
def __init__(
self,
*,
entry_id: str,
device_id: str,
coordinator: IPPDataUpdateCoordinator,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
"""Initialize the IPP entity."""
super().__init__(coordinator)
self._device_id = device_id
self._enabled_default = enabled_default
self._entry_id = entry_id
self._icon = icon
self._name = name
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this IPP device."""
if self._device_id is None:
return None
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_id)},
ATTR_NAME: self.coordinator.data.info.name,
ATTR_MANUFACTURER: self.coordinator.data.info.manufacturer,
ATTR_MODEL: self.coordinator.data.info.model,
ATTR_SOFTWARE_VERSION: self.coordinator.data.info.version,
}
| apache-2.0 |
santhoshtr/mlmorph | tests/mlmorph-test.py | 1 | 2470 | import json
import unittest
import sys
import os
import re
from mlmorph import Generator, Analyser
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
class Struct:
def __init__(self, entries):
self.__dict__.update(**entries)
class AnalyserGeneratorTests(unittest.TestCase):
generator = Generator()
analyser = Analyser()
def setUp(self):
self.testFile = open(os.path.join(CURR_DIR, 'tests.json'))
self.tests = json.load(self.testFile, object_hook=Struct)
def tearDown(self):
self.testFile.close()
def test_analyse(self):
print('\t**** Analyse tests ****\t')
line = 0
for test in self.tests:
line += 1
with self.subTest(test.word):
anals = self.analyser.analyse(test.word)
match = False
if not (hasattr(test, 'skip') and test.skip):
self.assertTrue(len(anals) != 0,
'Analysis failed for ' + test.word)
else:
continue
print('%3d %s\t<--\t%s' % (line, test.word, anals))
for index in range(len(anals)):
if test.analysis == anals[index][0]:
match = True
break
if not (hasattr(test, 'skip') and test.skip):
self.assertEqual(
match, True, 'Analysis for ' + test.analysis)
def test_generate(self):
print('\t**** Generate tests ****\t')
line = 0
for test in self.tests:
line += 1
with self.subTest(test.word):
match = False
gens = self.generator.generate(test.analysis, True)
if not (hasattr(test, 'skip') and test.skip):
self.assertTrue(
len(gens) != 0, 'Generate failed for ' + test.analysis)
else:
continue
print('%3d %s\t<--\t%s' % (line, test.analysis, gens))
for index in range(len(gens)):
if test.word == gens[index][0]:
match = True
break
if not (hasattr(test, 'skip') and test.skip):
self.assertEqual(
match, True, 'Generate for ' + test.analysis)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
vbillardm/vbillardm-webp2018-SI201704-la-truelle | wordpress/node_modules/node-gyp/gyp/pylib/gyp/MSVSNew.py | 1835 | 12124 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
| mit |
prasaianooz/pip | tests/functional/test_uninstall_user.py | 36 | 3417 | """
tests specific to uninstalling --user installs
"""
import pytest
from os.path import isdir, isfile
from tests.lib import pyversion, assert_all_changes
from tests.functional.test_install_user import _patch_dist_in_site_packages
class Tests_UninstallUserSite:
@pytest.mark.network
def test_uninstall_from_usersite(self, script, virtualenv):
"""
Test uninstall from usersite
"""
virtualenv.system_site_packages = True
result1 = script.pip('install', '--user', 'INITools==0.3')
result2 = script.pip('uninstall', '-y', 'INITools')
assert_all_changes(result1, result2, [script.venv / 'build', 'cache'])
def test_uninstall_from_usersite_with_dist_in_global_site(
self, script, virtualenv):
"""
Test uninstall from usersite (with same dist in global site)
"""
# the test framework only supports testing using virtualenvs.
# the sys.path ordering for virtualenvs with --system-site-packages is
# this: virtualenv-site, user-site, global-site.
# this test will use 2 modifications to simulate the
# user-site/global-site relationship
# 1) a monkey patch which will make it appear piptestpackage is not in
# the virtualenv site if we don't patch this, pip will return an
# installation error: "Will not install to the usersite because it
# will lack sys.path precedence..."
# 2) adding usersite to PYTHONPATH, so usersite has sys.path precedence
# over the virtualenv site
virtualenv.system_site_packages = True
script.environ["PYTHONPATH"] = script.base_path / script.user_site
_patch_dist_in_site_packages(script)
script.pip_install_local('pip-test-package==0.1')
result2 = script.pip_install_local('--user', 'pip-test-package==0.1.1')
result3 = script.pip('uninstall', '-vy', 'pip-test-package')
# uninstall console is mentioning user scripts, but not global scripts
assert script.user_bin_path in result3.stdout
assert script.bin_path not in result3.stdout
# uninstall worked
assert_all_changes(result2, result3, [script.venv / 'build', 'cache'])
# site still has 0.2 (can't look in result1; have to check)
egg_info_folder = (
script.base_path / script.site_packages /
'pip_test_package-0.1-py%s.egg-info' % pyversion
)
assert isdir(egg_info_folder)
def test_uninstall_editable_from_usersite(self, script, virtualenv, data):
"""
Test uninstall editable local user install
"""
virtualenv.system_site_packages = True
script.user_site_path.makedirs()
# install
to_install = data.packages.join("FSPkg")
result1 = script.pip(
'install', '--user', '-e', to_install, expect_error=False,
)
egg_link = script.user_site / 'FSPkg.egg-link'
assert egg_link in result1.files_created, str(result1.stdout)
# uninstall
result2 = script.pip('uninstall', '-y', 'FSPkg')
assert not isfile(script.base_path / egg_link)
assert_all_changes(
result1,
result2,
[
script.venv / 'build',
'cache',
script.user_site / 'easy-install.pth',
]
)
| mit |
ArcherSys/ArcherSys | Lib/site-packages/cms/tests/forms.py | 8 | 6098 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from django.contrib.sites.models import Site
from django.core.cache import cache
from cms.admin import forms
from cms.admin.forms import PageUserForm
from cms.api import create_page, create_page_user
from cms.forms.fields import PageSelectFormField, SuperLazyIterator
from cms.forms.utils import (get_site_choices, get_page_choices,
update_site_and_page_choices)
from cms.test_utils.testcases import CMSTestCase
from cms.utils.compat.dj import get_user_model
class Mock_PageSelectFormField(PageSelectFormField):
def __init__(self, required=False):
# That's to have a proper mock object, without having to resort
# to dirtier tricks. We want to test *just* compress here.
self.required = required
self.error_messages = {}
self.error_messages['invalid_page'] = 'Invalid_page'
class FormsTestCase(CMSTestCase):
def setUp(self):
cache.clear()
def test_get_site_choices(self):
result = get_site_choices()
self.assertEqual(result, [])
def test_get_page_choices(self):
result = get_page_choices()
self.assertEqual(result, [('', '----')])
def test_get_site_choices_without_moderator(self):
result = get_site_choices()
self.assertEqual(result, [])
def test_get_site_choices_without_moderator_with_superuser(self):
# boilerplate (creating a page)
User = get_user_model()
fields = dict(is_staff=True, is_active=True, is_superuser=True, email="super@super.com")
if User.USERNAME_FIELD != 'email':
fields[User.USERNAME_FIELD] = "super"
user_super = User(**fields)
user_super.set_password(getattr(user_super, User.USERNAME_FIELD))
user_super.save()
with self.login_user_context(user_super):
create_page("home", "nav_playground.html", "en", created_by=user_super)
# The proper test
result = get_site_choices()
self.assertEqual(result, [(1, 'example.com')])
def test_compress_function_raises_when_page_is_none(self):
raised = False
try:
fake_field = Mock_PageSelectFormField(required=True)
data_list = (0, None) #(site_id, page_id) dsite-id is not used
fake_field.compress(data_list)
self.fail('compress function didn\'t raise!')
except forms.ValidationError:
raised = True
self.assertTrue(raised)
def test_compress_function_returns_none_when_not_required(self):
fake_field = Mock_PageSelectFormField(required=False)
data_list = (0, None) #(site_id, page_id) dsite-id is not used
result = fake_field.compress(data_list)
self.assertEqual(result, None)
def test_compress_function_returns_none_when_no_data_list(self):
fake_field = Mock_PageSelectFormField(required=False)
data_list = None
result = fake_field.compress(data_list)
self.assertEqual(result, None)
def test_compress_function_gets_a_page_when_one_exists(self):
# boilerplate (creating a page)
User = get_user_model()
fields = dict(is_staff=True, is_active=True, is_superuser=True, email="super@super.com")
if User.USERNAME_FIELD != 'email':
fields[User.USERNAME_FIELD] = "super"
user_super = User(**fields)
user_super.set_password(getattr(user_super, User.USERNAME_FIELD))
user_super.save()
with self.login_user_context(user_super):
home_page = create_page("home", "nav_playground.html", "en", created_by=user_super)
# The actual test
fake_field = Mock_PageSelectFormField()
data_list = (0, home_page.pk) #(site_id, page_id) dsite-id is not used
result = fake_field.compress(data_list)
self.assertEqual(home_page, result)
def test_update_site_and_page_choices(self):
Site.objects.all().delete()
site = Site.objects.create(domain='http://www.django-cms.org', name='Django CMS', pk=1)
page1 = create_page('Page 1', 'nav_playground.html', 'en', site=site)
page2 = create_page('Page 2', 'nav_playground.html', 'de', site=site)
page3 = create_page('Page 3', 'nav_playground.html', 'en',
site=site, parent=page1)
# enforce the choices to be casted to a list
site_choices, page_choices = [list(bit) for bit in update_site_and_page_choices('en')]
self.assertEqual(page_choices, [
('', '----'),
(site.name, [
(page1.pk, 'Page 1'),
(page3.pk, ' Page 3'),
(page2.pk, 'Page 2'),
])
])
self.assertEqual(site_choices, [(site.pk, site.name)])
def test_superlazy_iterator_behaves_properly_for_sites(self):
normal_result = get_site_choices()
lazy_result = SuperLazyIterator(get_site_choices)
self.assertEqual(normal_result, list(lazy_result))
def test_superlazy_iterator_behaves_properly_for_pages(self):
normal_result = get_page_choices()
lazy_result = SuperLazyIterator(get_page_choices)
self.assertEqual(normal_result, list(lazy_result))
def test_page_user_form_initial(self):
if get_user_model().USERNAME_FIELD == 'email':
myuser = get_user_model().objects.create_superuser("myuser", "myuser@django-cms.org", "myuser@django-cms.org")
else:
myuser = get_user_model().objects.create_superuser("myuser", "myuser@django-cms.org", "myuser")
user = create_page_user(myuser, myuser, grant_all=True)
puf = PageUserForm(instance=user)
names = ['can_add_page', 'can_change_page', 'can_delete_page',
'can_add_pageuser', 'can_change_pageuser',
'can_delete_pageuser', 'can_add_pagepermission',
'can_change_pagepermission', 'can_delete_pagepermission']
for name in names:
self.assertTrue(puf.initial.get(name, False))
| mit |
Chilledheart/vbox | src/VBox/ValidationKit/testmanager/webui/wuiadmintestbox.py | 1 | 18147 | # -*- coding: utf-8 -*-
# $Id$
"""
Test Manager WUI - TestBox.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Standard python imports.
import socket;
# Validation Kit imports.
from testmanager.webui.wuicontentbase import WuiListContentWithActionBase, WuiFormContentBase, WuiLinkBase, WuiSvnLink, \
WuiTmLink, WuiSpanText, WuiRawHtml;
from testmanager.core.db import TMDatabaseConnection;
from testmanager.core.schedgroup import SchedGroupLogic, SchedGroupData;
from testmanager.core.testbox import TestBoxData;
from testmanager.core.testset import TestSetData;
from common import utils;
from testmanager.core.db import isDbTimestampInfinity;
class WuiTestBox(WuiFormContentBase):
"""
WUI TestBox Form Content Generator.
"""
def __init__(self, oData, sMode, oDisp):
if sMode == WuiFormContentBase.ksMode_Add:
sTitle = 'Create TextBox';
if oData.uuidSystem is not None and len(oData.uuidSystem) > 10:
sTitle += ' - ' + oData.uuidSystem;
elif sMode == WuiFormContentBase.ksMode_Edit:
sTitle = 'Edit TestBox - %s (#%s)' % (oData.sName, oData.idTestBox);
else:
assert sMode == WuiFormContentBase.ksMode_Show;
sTitle = 'TestBox - %s (#%s)' % (oData.sName, oData.idTestBox);
WuiFormContentBase.__init__(self, oData, sMode, 'TestBox', oDisp, sTitle);
# Try enter sName as hostname (no domain) when creating the testbox.
if sMode == WuiFormContentBase.ksMode_Add \
and self._oData.sName in [None, ''] \
and self._oData.ip not in [None, '']:
try:
(self._oData.sName, _, _) = socket.gethostbyaddr(self._oData.ip);
except:
pass;
offDot = self._oData.sName.find('.');
if offDot > 0:
self._oData.sName = self._oData.sName[:offDot];
def _populateForm(self, oForm, oData):
oForm.addIntRO( TestBoxData.ksParam_idTestBox, oData.idTestBox, 'TestBox ID');
oForm.addIntRO( TestBoxData.ksParam_idGenTestBox, oData.idGenTestBox, 'TestBox generation ID');
oForm.addTimestampRO(TestBoxData.ksParam_tsEffective, oData.tsEffective, 'Last changed');
oForm.addTimestampRO(TestBoxData.ksParam_tsExpire, oData.tsExpire, 'Expires (excl)');
oForm.addIntRO( TestBoxData.ksParam_uidAuthor, oData.uidAuthor, 'Changed by UID');
oForm.addText( TestBoxData.ksParam_ip, oData.ip, 'TestBox IP Address');
oForm.addUuid( TestBoxData.ksParam_uuidSystem, oData.uuidSystem, 'TestBox System/Firmware UUID');
oForm.addText( TestBoxData.ksParam_sName, oData.sName, 'TestBox Name');
oForm.addText( TestBoxData.ksParam_sDescription, oData.sDescription, 'TestBox Description');
oForm.addComboBox( TestBoxData.ksParam_idSchedGroup, oData.idSchedGroup, 'Scheduling Group',
SchedGroupLogic(TMDatabaseConnection()).getSchedGroupsForCombo());
oForm.addCheckBox( TestBoxData.ksParam_fEnabled, oData.fEnabled, 'Enabled');
oForm.addComboBox( TestBoxData.ksParam_enmLomKind, oData.enmLomKind, 'Lights-out-management',
TestBoxData.kaoLomKindDescs);
oForm.addText( TestBoxData.ksParam_ipLom, oData.ipLom, 'Lights-out-management IP Address');
oForm.addInt( TestBoxData.ksParam_pctScaleTimeout, oData.pctScaleTimeout, 'Timeout scale factor (%)');
## @todo Pretty format the read-only fields and use hidden fields for
# passing the actual values. (Yes, we need the values so we can
# display the form correctly on input error.)
oForm.addTextRO( TestBoxData.ksParam_sOs, oData.sOs, 'TestBox OS');
oForm.addTextRO( TestBoxData.ksParam_sOsVersion, oData.sOsVersion, 'TestBox OS version');
oForm.addTextRO( TestBoxData.ksParam_sCpuArch, oData.sCpuArch, 'TestBox OS kernel architecture');
oForm.addTextRO( TestBoxData.ksParam_sCpuVendor, oData.sCpuVendor, 'TestBox CPU vendor');
oForm.addTextRO( TestBoxData.ksParam_sCpuName, oData.sCpuName, 'TestBox CPU name');
if oData.lCpuRevision:
oForm.addTextRO( TestBoxData.ksParam_lCpuRevision, '%#x' % (oData.lCpuRevision,), 'TestBox CPU revision',
sPostHtml = ' (family=%#x model=%#x stepping=%#x)'
% (oData.getCpuFamily(), oData.getCpuModel(), oData.getCpuStepping(),),
sSubClass = 'long');
else:
oForm.addLongRO( TestBoxData.ksParam_lCpuRevision, oData.lCpuRevision, 'TestBox CPU revision');
oForm.addIntRO( TestBoxData.ksParam_cCpus, oData.cCpus, 'Number of CPUs, cores and threads');
oForm.addCheckBoxRO( TestBoxData.ksParam_fCpuHwVirt, oData.fCpuHwVirt, 'VT-x or AMD-V supported');
oForm.addCheckBoxRO( TestBoxData.ksParam_fCpuNestedPaging, oData.fCpuNestedPaging, 'Nested paging supported');
oForm.addCheckBoxRO( TestBoxData.ksParam_fCpu64BitGuest, oData.fCpu64BitGuest, '64-bit guest supported');
oForm.addCheckBoxRO( TestBoxData.ksParam_fChipsetIoMmu, oData.fChipsetIoMmu, 'I/O MMU supported');
oForm.addMultilineTextRO(TestBoxData.ksParam_sReport, oData.sReport, 'Hardware/software report');
oForm.addLongRO( TestBoxData.ksParam_cMbMemory, oData.cMbMemory, 'Installed RAM size (MB)');
oForm.addLongRO( TestBoxData.ksParam_cMbScratch, oData.cMbScratch, 'Available scratch space (MB)');
oForm.addIntRO( TestBoxData.ksParam_iTestBoxScriptRev, oData.iTestBoxScriptRev,
'TestBox Script SVN revision');
# Later:
#if not self.isAttributeNull(''):
# sHexVer = '%s.%s.%.%s' % (oData.iPythonHexVersion >> 24, (oData.iPythonHexVersion >> 16) & 0xff,
# (oData.iPythonHexVersion >> 8) & 0xff, oData.iPythonHexVersion & 0xff);
#else:
# sHexVer = str(oData.iPythonHexVersion);
oForm.addIntRO( TestBoxData.ksParam_iPythonHexVersion, oData.iPythonHexVersion,
'Python version (hex)');
if self._sMode == WuiFormContentBase.ksMode_Edit:
oForm.addComboBox(TestBoxData.ksParam_enmPendingCmd, oData.enmPendingCmd, 'Pending command',
TestBoxData.kaoTestBoxCmdDescs);
else:
oForm.addComboBoxRO(TestBoxData.ksParam_enmPendingCmd, oData.enmPendingCmd, 'Pending command',
TestBoxData.kaoTestBoxCmdDescs);
if self._sMode != WuiFormContentBase.ksMode_Show:
oForm.addSubmit('Create TestBox' if self._sMode == WuiFormContentBase.ksMode_Add else 'Change TestBox');
return True;
class WuiTestBoxList(WuiListContentWithActionBase):
"""
WUI TestBox List Content Generator.
"""
## Descriptors for the combo box.
kasTestBoxActionDescs = \
[ \
[ 'none', 'Select an action...', '' ],
[ 'enable', 'Enable', '' ],
[ 'disable', 'Disable', '' ],
TestBoxData.kaoTestBoxCmdDescs[1],
TestBoxData.kaoTestBoxCmdDescs[2],
TestBoxData.kaoTestBoxCmdDescs[3],
TestBoxData.kaoTestBoxCmdDescs[4],
TestBoxData.kaoTestBoxCmdDescs[5],
];
def __init__(self, aoEntries, iPage, cItemsPerPage, tsEffective, fnDPrint, oDisp):
WuiListContentWithActionBase.__init__(self, aoEntries, iPage, cItemsPerPage, tsEffective,
sTitle = 'TestBoxes', sId = 'users', fnDPrint = fnDPrint, oDisp = oDisp);
self._asColumnHeaders.extend([ 'Name', 'LOM', 'Status',
'Cmd', 'Script', 'Python', 'Group',
'OS', 'CPU', 'Features', 'CPUs', 'RAM', 'Scratch',
'Actions' ]);
self._asColumnAttribs.extend([ 'align="center"', 'align="center"', 'align="center"',
'align="center"', 'align="center"', 'align="center"', 'align="center"',
'', '', '', 'align="right"', 'align="right"', 'align="right"',
'align="center"' ]);
self._aoActions = list(self.kasTestBoxActionDescs);
self._aoSchedGroups = SchedGroupLogic(self._oDisp.getDb()).fetchOrderedByName();
self._dSchedGroups = dict();
for oSchedGroup in self._aoSchedGroups:
self._aoActions.append([ 'setgroup-%u' % (oSchedGroup.idSchedGroup,),
'Migrate to group %s (#%u)' % (oSchedGroup.sName, oSchedGroup.idSchedGroup,),
oSchedGroup.sDescription ]);
self._dSchedGroups[oSchedGroup.idSchedGroup] = oSchedGroup;
self._sAction = oDisp.ksActionTestBoxListPost;
self._sCheckboxName = TestBoxData.ksParam_idTestBox;
def _formatListEntry(self, iEntry): # pylint: disable=R0914
from testmanager.webui.wuiadmin import WuiAdmin;
oEntry = self._aoEntries[iEntry];
# Lights outs managment.
if oEntry.enmLomKind == TestBoxData.ksLomKind_ILOM:
aoLom = [ WuiLinkBase('ILOM', 'https://%s/' % (oEntry.ipLom,), fBracketed = False), ];
elif oEntry.enmLomKind == TestBoxData.ksLomKind_ELOM:
aoLom = [ WuiLinkBase('ELOM', 'http://%s/' % (oEntry.ipLom,), fBracketed = False), ];
elif oEntry.enmLomKind == TestBoxData.ksLomKind_AppleXserveLom:
aoLom = [ 'Apple LOM' ];
elif oEntry.enmLomKind == TestBoxData.ksLomKind_None:
aoLom = [ 'none' ];
else:
aoLom = [ 'Unexpected enmLomKind value "%s"' % (oEntry.enmLomKind,) ];
if oEntry.ipLom is not None:
if oEntry.enmLomKind in [ TestBoxData.ksLomKind_ILOM, TestBoxData.ksLomKind_ELOM ]:
aoLom += [ WuiLinkBase('(ssh)', 'ssh://%s' % (oEntry.ipLom,), fBracketed = False) ];
aoLom += [ WuiRawHtml('<br>'), '%s' % (oEntry.ipLom,) ];
# State and Last seen.
if oEntry.oStatus is None:
oSeen = WuiSpanText('tmspan-offline', 'Never');
oState = '';
else:
oDelta = oEntry.tsCurrent - oEntry.oStatus.tsUpdated;
if oDelta.days <= 0 and oDelta.seconds <= 15*60: # 15 mins and we consider you dead.
oSeen = WuiSpanText('tmspan-online', u'%s\u00a0s\u00a0ago' % (oDelta.days * 24 * 3600 + oDelta.seconds,));
else:
oSeen = WuiSpanText('tmspan-offline', u'%s' % (self.formatTsShort(oEntry.oStatus.tsUpdated),));
if oEntry.oStatus.idTestSet is None:
oState = str(oEntry.oStatus.enmState);
else:
from testmanager.webui.wuimain import WuiMain;
oState = WuiTmLink(oEntry.oStatus.enmState, WuiMain.ksScriptName,
{ WuiMain.ksParamAction: WuiMain.ksActionTestResultDetails,
TestSetData.ksParam_idTestSet: oEntry.oStatus.idTestSet, },
sTitle = '#%u' % (oEntry.oStatus.idTestSet,),
fBracketed = False);
# Group link.
oGroup = self._dSchedGroups.get(oEntry.idSchedGroup);
oGroupLink = WuiTmLink(oGroup.sName if oGroup is not None else str(oEntry.idSchedGroup),
WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionSchedGroupEdit,
SchedGroupData.ksParam_idSchedGroup: oEntry.idSchedGroup, },
sTitle = '#%u' % (oEntry.idSchedGroup,),
fBracketed = False);
# Reformat the OS version to take less space.
aoOs = [ 'N/A' ];
if oEntry.sOs is not None and oEntry.sOsVersion is not None and oEntry.sCpuArch:
sOsVersion = oEntry.sOsVersion;
if sOsVersion[0] not in [ 'v', 'V', 'r', 'R'] \
and sOsVersion[0].isdigit() \
and sOsVersion.find('.') in range(4) \
and oEntry.sOs in [ 'linux', 'solaris', 'darwin', ]:
sOsVersion = 'v' + sOsVersion;
sVer1 = sOsVersion;
sVer2 = None;
if oEntry.sOs == 'linux':
iSep = sOsVersion.find(' / ');
if iSep > 0:
sVer1 = sOsVersion[:iSep].strip();
sVer2 = sOsVersion[iSep + 3:].strip();
sVer2 = sVer2.replace('Red Hat Enterprise Linux Server', 'RHEL');
elif oEntry.sOs == 'solaris':
iSep = sOsVersion.find(' (');
if iSep > 0 and sOsVersion[-1] == ')':
sVer1 = sOsVersion[:iSep].strip();
sVer2 = sOsVersion[iSep + 2:-1].strip();
aoOs = [
WuiSpanText('tmspan-osarch', u'%s.%s' % (oEntry.sOs, oEntry.sCpuArch,)),
WuiSpanText('tmspan-osver1', sVer1.replace('-', u'\u2011'),),
];
if sVer2 is not None:
aoOs += [ WuiRawHtml('<br>'), WuiSpanText('tmspan-osver2', sVer2.replace('-', u'\u2011')), ];
# Format the CPU revision.
oCpu = None;
if oEntry.lCpuRevision is not None and oEntry.sCpuVendor is not None and oEntry.sCpuName is not None:
oCpu = [
u'%s (fam:%xh\u00a0m:%xh\u00a0s:%xh)'
% (oEntry.sCpuVendor, oEntry.getCpuFamily(), oEntry.getCpuModel(), oEntry.getCpuStepping(),),
WuiRawHtml('<br>'),
oEntry.sCpuName,
];
else:
oCpu = [];
if oEntry.sCpuVendor is not None:
oCpu.append(oEntry.sCpuVendor);
if oEntry.lCpuRevision is not None:
oCpu.append('%#x' % (oEntry.lCpuRevision,));
if oEntry.sCpuName is not None:
oCpu.append(oEntry.sCpuName);
# Stuff cpu vendor and cpu/box features into one field.
asFeatures = []
if oEntry.fCpuHwVirt is True: asFeatures.append(u'HW\u2011Virt');
if oEntry.fCpuNestedPaging is True: asFeatures.append(u'Nested\u2011Paging');
if oEntry.fCpu64BitGuest is True: asFeatures.append(u'64\u2011bit\u2011Guest');
if oEntry.fChipsetIoMmu is True: asFeatures.append(u'I/O\u2011MMU');
sFeatures = u' '.join(asFeatures) if len(asFeatures) > 0 else u'';
# Collection applicable actions.
aoActions = [
WuiTmLink('Details', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestBoxDetails,
TestBoxData.ksParam_idTestBox: oEntry.idTestBox,
WuiAdmin.ksParamEffectiveDate: self._tsEffectiveDate, } ),
]
if isDbTimestampInfinity(oEntry.tsExpire):
aoActions += [
WuiTmLink('Edit', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestBoxEdit,
TestBoxData.ksParam_idTestBox: oEntry.idTestBox, } ),
WuiTmLink('Remove', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestBoxRemovePost,
TestBoxData.ksParam_idTestBox: oEntry.idTestBox },
sConfirm = 'Are you sure that you want to remove %s (%s)?' % (oEntry.sName, oEntry.ip) ),
]
if oEntry.sOs not in [ 'win', 'os2', ] and oEntry.ip is not None:
aoActions.append(WuiLinkBase('ssh', 'ssh://vbox@%s' % (oEntry.ip,),));
return [ self._getCheckBoxColumn(iEntry, oEntry.idTestBox),
[ WuiSpanText('tmspan-name', oEntry.sName), WuiRawHtml('<br>'), '%s' % (oEntry.ip,),],
aoLom,
[
'' if oEntry.fEnabled else 'disabled / ',
oState,
WuiRawHtml('<br>'),
oSeen,
],
oEntry.enmPendingCmd,
WuiSvnLink(oEntry.iTestBoxScriptRev),
oEntry.formatPythonVersion(),
oGroupLink,
aoOs,
oCpu,
sFeatures,
oEntry.cCpus if oEntry.cCpus is not None else 'N/A',
utils.formatNumberNbsp(oEntry.cMbMemory) + u'\u00a0MB' if oEntry.cMbMemory is not None else 'N/A',
utils.formatNumberNbsp(oEntry.cMbScratch) + u'\u00a0MB' if oEntry.cMbScratch is not None else 'N/A',
aoActions,
];
| gpl-2.0 |
Makki1/old-svn | avr/sketchbook/GiraRM_Debug/freebus/freebus_ets/software/freebus-ets/src/GUI/FB_ProgramFrame.py | 1 | 10920 | #!/usr/bin/
#-*- coding: iso-8859-1 -*-
#===============================================================================
# __________ ________________ __ _______
# / ____/ __ \/ ____/ ____/ __ )/ / / / ___/
# / /_ / /_/ / __/ / __/ / __ / / / /\__ \
# / __/ / _, _/ /___/ /___/ /_/ / /_/ /___/ /
# /_/ /_/ |_/_____/_____/_____/\____//____/
#
#Source File: FB_ProgramFrame.py
#Version: V0.1 , 29.08.2009
#Author: Jerome Leisner
#email: j.leisner@ing-automation.de
#===============================================================================
import os
import sys
import time
#import thread
#import Queue
#import threading
#import thread
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import pickle
import jpype
import thread
from Global import Global
from GUI import FB_DlgConnectionManager
class FB_ProgramFrame(object):
__curProject = None #project object
__cbConnections = None #widget combo connections
__bConnect = None #widget connect button
__parentClass = None #object of its own class
__curConnectionInstance = None #instance of the current connection (FB_EIBConnection)
#Devices in programming mode
__ListViewProgDevices = None #widget Tree/Listview to show devices in programming mode
__CheckTimer = None #timer object for check devices in cycle
__toggleCheckProgDevices = None
def __init__(self,curProject):
self.__parentClass = self
self.__curProject = curProject
GladeObj = gtk.glade.XML(Global.GUIPath + Global.GladeFile,"winProgramming")
dic = { "on_bConnectionConfig_clicked":self.ShowConnectionManager ,
"on_bTestConnection_clicked":self.ClickTestConnection,
"on_bConnect_toggled":self.ToggleConnect,
"on_cbConnections_changed":self.ConnectionsChanged,
"on_toggleCheckProgDevices_toggled":self.ToggleCheckProgDevices,
}
GladeObj.signal_autoconnect(dic)
#read widgets
self.__cbConnections = GladeObj.get_widget("cbConnections")
self.__bConnect = GladeObj.get_widget("bConnect")
self.__ListViewProgDevices = GladeObj.get_widget("ListViewProgDevices")
self.__toggleCheckProgDevices = GladeObj.get_widget("toggleCheckProgDevices")
#init model combobox to show connections
liststore = gtk.ListStore(str,str) #just one string at first..., 2nd string for GUID
self.__cbConnections.set_model(liststore)
self.text_cell = gtk.CellRendererText()
self.__cbConnections.pack_start(self.text_cell,True)
self.__cbConnections.add_attribute(self.text_cell, "text", 0)
#init model tree/listview to show devices in progmode
liststore = gtk.ListStore(gtk.gdk.Pixbuf, str)
self.__ListViewProgDevices.set_model(liststore)
self.text_cell = gtk.CellRendererText() #Text Object
self.img_cell = gtk.CellRendererPixbuf() #Image Object
self.column = gtk.TreeViewColumn()
self.column.pack_start(self.img_cell, False)
self.column.pack_start(self.text_cell,True)
self.column.add_attribute(self.img_cell, "pixbuf",0)
self.column.add_attribute(self.text_cell, "text", 1)
self.column.set_attributes(self.text_cell, markup=1)
self.__ListViewProgDevices.append_column(self.column)
#init timer to check devices in progmode
#self.__CheckTimer = threading.Timer(5.0, self.ReadDevicesInProgMode)
self.LoadConnectionFromDB()
self.UpdateUserConnections()
winProgramming = GladeObj.get_widget("winProgramming")
winProgramming.show()
#Dialog: Connection-Manager
def ShowConnectionManager(self,widget, data=None):
FB_DlgConnectionManager.FB_DlgConnectionManager(self.__curProject, self.__parentClass)
#button: Test-Connection
#open the current connection and test it...
def ClickTestConnection(self,widget, data=None):
pass
def ToggleConnect(self,widget, data=None):
model = self.__cbConnections.get_model()
iter = self.__cbConnections.get_active_iter()
id = model.get_value(iter,1)
self.__curConnectionInstance = self.getEIBConnection(id)
if widget.get_active() == True:
#connect
self.__curConnectionInstance.doConnect()
else:
#disconnect
self.__curConnectionInstance.doDisconnect()
self.SetConnectButtonState(widget)
#callback change combo connections
def ConnectionsChanged(self,widget, data=None):
#disconnect in case of changing the connection
if self.__curConnectionInstance <> None:
self.__curConnectionInstance.doDisconnect()
self.SetConnectButtonState(self.__bConnect)
def SetConnectButtonState(self,widget):
if self.__curConnectionInstance.isConnected() == True:
widget.set_active(True)
widget.set_label("Verbunden")
else:
widget.set_active(False)
widget.set_label("Verbinden")
#gets the instance of a FB_EIBConnection with the given id
def getEIBConnection(self,id):
RValue = None
if self.__curProject <> None:
if self.__curProject.eibConnectionList <> None:
for i in range(len(self.__curProject.eibConnectionList)):
if id == self.__curProject.eibConnectionList[i].getID():
RValue = self.__curProject.eibConnectionList[i]
break
return RValue
##function to update the combobox in parentframe to show/select for user
#@param cbConnections: widget of the combobox in parentframe which should be loaded
def UpdateUserConnections(self):
try:
#copy list in combo connections in program_Frame (parent)
if(self.__curProject <> None):# and self._MyConnection <> None):
model = self.__cbConnections.get_model()
#save id of the current connection / which is currently selected
curIter = self.__cbConnections.get_active_iter()
if curIter <> None:
idsaved = model.get_value(curIter,1) #column 1 = id
else:
idsaved = 0
model.clear()
IterSaved = None #init Iterator
for i in range(len(self.__curProject.eibConnectionList)):
Name = self.__curProject.eibConnectionList[i].getName()
typeID = self.__curProject.eibConnectionList[i].getType()
Type = str(Global.ConTypesText[typeID])
id = self.__curProject.eibConnectionList[i].getID()
tmp = Name + " mit '" + Type + "'"
iter = model.append([tmp, id])
#look if saved id is still in list and set this item to the active item
if idsaved == id:
IterSaved = iter
#connection still existing...
if IterSaved <> None:
self.__cbConnections.set_active_iter(IterSaved)
else:
if len(self.__curProject.eibConnectionList) > 0:
self.__cbConnections.set_active(0)
else:
#no connections in list or no valid project is loaded
model = self.__cbConnections.get_model()
model.clear()
except:
pass
def LoadConnectionFromDB(self):
#try:
cursor = Global.DatabaseConnection.cursor()
cursor.execute("SELECT * FROM Connections")
del self.__curProject.eibConnectionList[0:len(self.__curProject.eibConnectionList)]
for row in cursor:
tmpCon = pickle.loads(row[2]) #column 2 contains class data
self.__curProject.eibConnectionList.append(tmpCon)
#except:
# pass
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
##button to start reading Devices in progmode
##
def ToggleCheckProgDevices(self,widget,Data=None):
if widget.get_active() == True:
widget.set_label("zyklischer Suchlauf...")
self.ReadDevicesInProgMode()
#self.__CheckTimer.start()
else:
widget.set_label("Suchlauf starten")
#self.__CheckTimer.cancel()
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#section physical addresses
def ReadDevicesInProgMode(self):
#read the PA of devices in programming mode
try:
mngClient = Global.ManagementClientImpl(self.__curConnectionInstance.getKNXNetworkLink())
IndivAddrList = mngClient.readAddress(False)
model = self.__ListViewProgDevices.get_model()
model.clear()
image=gtk.gdk.pixbuf_new_from_file(Global.ImagePath + "Device.png")
for Addr in IndivAddrList:
Iterator = model.append([image,Addr.toString()])
except jpype.JavaException, ex :
error = ""
if jpype.JavaException.javaClass(ex) is Global.KNXTimeoutException:
error = U"keine Geräte im Programmiermodus : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXInvalidResponseException :
error = U"ungültige Antwort beim Lesen der Addressen : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXLinkClosedException:
error = U"kein geöffneter Netzwerk-Link : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXRemoteException:
error = U"Fehler beim Remote-Server : " + str(jpype.JavaException.message(ex))
msgbox = gtk.MessageDialog(parent = None, buttons = gtk.BUTTONS_OK,
flags = gtk.DIALOG_MODAL, type = gtk.MESSAGE_ERROR,
message_format = error )
msgbox.set_title(Global.ERRORCONNECTIONTITLE)
#result = msgbox.run()
#msgbox.destroy()
| gpl-3.0 |
yury-s/v8-inspector | Source/chrome/tools/telemetry/telemetry/core/platform/tracing_category_filter_unittest.py | 54 | 5954 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform import tracing_category_filter
class TracingCategoryFilterTests(unittest.TestCase):
def testBasic(self):
f = tracing_category_filter.TracingCategoryFilter(
'x,-y,disabled-by-default-z,DELAY(7;foo)')
self.assertEquals(set(['x']), set(f.included_categories))
self.assertEquals(set(['y']), set(f.excluded_categories))
self.assertEquals(set(['disabled-by-default-z']),
set(f.disabled_by_default_categories))
self.assertEquals(set(['DELAY(7;foo)']), set(f.synthetic_delays))
self.assertTrue('x' in f.filter_string)
self.assertEquals(
'x,disabled-by-default-z,-y,DELAY(7;foo)',
f.stable_filter_string)
class CategoryFilterTest(unittest.TestCase):
def testAddIncludedCategory(self):
a = tracing_category_filter.TracingCategoryFilter()
a.AddIncludedCategory('foo')
a.AddIncludedCategory('bar')
a.AddIncludedCategory('foo')
self.assertEquals(a.stable_filter_string, 'bar,foo')
def testAddExcludedCategory(self):
a = tracing_category_filter.TracingCategoryFilter()
a.AddExcludedCategory('foo')
a.AddExcludedCategory('bar')
a.AddExcludedCategory('foo')
self.assertEquals(a.stable_filter_string, '-bar,-foo')
def testIncludeAndExcludeCategoryRaisesAssertion(self):
a = tracing_category_filter.TracingCategoryFilter()
a.AddIncludedCategory('foo')
self.assertRaises(AssertionError, a.AddExcludedCategory, 'foo')
a = tracing_category_filter.TracingCategoryFilter()
a.AddExcludedCategory('foo')
self.assertRaises(AssertionError, a.AddIncludedCategory, 'foo')
self.assertRaises(AssertionError,
tracing_category_filter.TracingCategoryFilter, 'foo,-foo')
self.assertRaises(AssertionError,
tracing_category_filter.TracingCategoryFilter, '-foo,foo')
def testIsSubset(self):
b = tracing_category_filter.TracingCategoryFilter()
a = tracing_category_filter.TracingCategoryFilter()
self.assertEquals(a.IsSubset(b), True)
b = tracing_category_filter.TracingCategoryFilter()
a = tracing_category_filter.TracingCategoryFilter("test1,test2")
self.assertEquals(a.IsSubset(b), True)
b = tracing_category_filter.TracingCategoryFilter()
a = tracing_category_filter.TracingCategoryFilter("-test1,-test2")
self.assertEquals(a.IsSubset(b), True)
b = tracing_category_filter.TracingCategoryFilter("test1,test2")
a = tracing_category_filter.TracingCategoryFilter()
self.assertEquals(a.IsSubset(b), None)
b = tracing_category_filter.TracingCategoryFilter()
a = tracing_category_filter.TracingCategoryFilter("test*")
self.assertEquals(a.IsSubset(b), None)
b = tracing_category_filter.TracingCategoryFilter("test?")
a = tracing_category_filter.TracingCategoryFilter()
self.assertEquals(a.IsSubset(b), None)
b = tracing_category_filter.TracingCategoryFilter("test1")
a = tracing_category_filter.TracingCategoryFilter("test1,test2")
self.assertEquals(a.IsSubset(b), False)
b = tracing_category_filter.TracingCategoryFilter("-test1")
a = tracing_category_filter.TracingCategoryFilter("test1")
self.assertEquals(a.IsSubset(b), False)
b = tracing_category_filter.TracingCategoryFilter("test1,test2")
a = tracing_category_filter.TracingCategoryFilter("test2,test1")
self.assertEquals(a.IsSubset(b), True)
b = tracing_category_filter.TracingCategoryFilter("-test1,-test2")
a = tracing_category_filter.TracingCategoryFilter("-test2")
self.assertEquals(a.IsSubset(b), False)
b = tracing_category_filter.TracingCategoryFilter(
"disabled-by-default-test1")
a = tracing_category_filter.TracingCategoryFilter(
"disabled-by-default-test1,disabled-by-default-test2")
self.assertEquals(a.IsSubset(b), False)
b = tracing_category_filter.TracingCategoryFilter(
"disabled-by-default-test1")
a = tracing_category_filter.TracingCategoryFilter(
"disabled-by-default-test2")
self.assertEquals(a.IsSubset(b), False)
def testIsSubsetWithSyntheticDelays(self):
b = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
a = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
self.assertEquals(a.IsSubset(b), True)
b = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
a = tracing_category_filter.TracingCategoryFilter()
self.assertEquals(a.IsSubset(b), True)
b = tracing_category_filter.TracingCategoryFilter()
a = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
self.assertEquals(a.IsSubset(b), False)
b = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.016)")
a = tracing_category_filter.TracingCategoryFilter("DELAY(foo;0.032)")
self.assertEquals(a.IsSubset(b), False)
b = tracing_category_filter.TracingCategoryFilter(
"DELAY(foo;0.016;static)")
a = tracing_category_filter.TracingCategoryFilter(
"DELAY(foo;0.016;oneshot)")
self.assertEquals(a.IsSubset(b), False)
b = tracing_category_filter.TracingCategoryFilter(
"DELAY(foo;0.016),DELAY(bar;0.1)")
a = tracing_category_filter.TracingCategoryFilter(
"DELAY(bar;0.1),DELAY(foo;0.016)")
self.assertEquals(a.IsSubset(b), True)
b = tracing_category_filter.TracingCategoryFilter(
"DELAY(foo;0.016),DELAY(bar;0.1)")
a = tracing_category_filter.TracingCategoryFilter(
"DELAY(bar;0.1)")
self.assertEquals(a.IsSubset(b), True)
b = tracing_category_filter.TracingCategoryFilter(
"DELAY(foo;0.016),DELAY(bar;0.1)")
a = tracing_category_filter.TracingCategoryFilter(
"DELAY(foo;0.032),DELAY(bar;0.1)")
self.assertEquals(a.IsSubset(b), False)
| bsd-3-clause |
lombritz/odoo | addons/sale_margin/__init__.py | 441 | 1042 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_margin
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gsmaxwell/phase_offset_rx | gnuradio-core/src/examples/pfb/fmtest.py | 17 | 7785 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
fmtx = blks2.nbfm_tx (audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = gr.sig_source_c (if_rate, # sample rate
gr.GR_SIN_WAVE, # waveform type
lo_freq, #frequency
1.0, # amplitude
0) # DC Offset
mixer = gr.multiply_cc ()
self.connect (self, fmtx, (mixer, 0))
self.connect (lo, (mixer, 1))
self.connect (mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = gr.add_cc ()
for n in xrange(self._N):
sig = gr.sig_source_f(self._audio_rate, gr.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = gr.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = gr.vector_sink_c()
self.channel = blks2.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = gr.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=gr.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(blks2.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(blks2.standard_squelch(self._audio_rate*10))
self.snks.append(gr.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
iTagir/kubernetes | translations/extract.py | 65 | 2987 | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract strings from command files and externalize into translation files.
Expects to be run from the root directory of the repository.
Usage:
extract.py pkg/kubectl/cmd/apply.go
"""
import fileinput
import sys
import re
class MatchHandler(object):
""" Simple holder for a regular expression and a function
to run if that regular expression matches a line.
The function should expect (re.match, file, linenumber) as parameters
"""
def __init__(self, regex, replace_fn):
self.regex = re.compile(regex)
self.replace_fn = replace_fn
def short_replace(match, file, line_number):
"""Replace a Short: ... cobra command description with an internationalization
"""
sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2)))
SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace)
def import_replace(match, file, line_number):
"""Add an extra import for the i18n library.
Doesn't try to be smart and detect if it's already present, assumes a
gofmt round wil fix things.
"""
sys.stdout.write('{}\n"k8s.io/kubernetes/pkg/util/i18n"\n'.format(match.group(1)))
IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubernetes/pkg/kubectl/cmd/util")', import_replace)
def string_flag_replace(match, file, line_number):
"""Replace a cmd.Flags().String("...", "", "...") with an internationalization
"""
sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2)))
STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace)
def replace(filename, matchers):
"""Given a file and a set of matchers, run those matchers
across the file and replace it with the results.
"""
# Run all the matchers
line_number = 0
for line in fileinput.input(filename, inplace=True):
line_number += 1
matched = False
for matcher in matchers:
match = matcher.regex.match(line)
if match:
matcher.replace_fn(match, filename, line_number)
matched = True
break
if not matched:
sys.stdout.write(line)
sys.stdout.flush()
# gofmt the file again
from subprocess import call
call(["goimports", "-w", filename])
replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH])
| apache-2.0 |
cristiana214/cristianachavez214-cristianachavez | python/src/Lib/test/test_binhex.py | 192 | 1027 | #! /usr/bin/env python
"""Test script for the binhex C module
Uses the mechanism of the python binhex module
Based on an original test by Roger E. Masse.
"""
import binhex
import os
import unittest
from test import test_support
class BinHexTestCase(unittest.TestCase):
def setUp(self):
self.fname1 = test_support.TESTFN + "1"
self.fname2 = test_support.TESTFN + "2"
def tearDown(self):
try: os.unlink(self.fname1)
except OSError: pass
try: os.unlink(self.fname2)
except OSError: pass
DATA = 'Jack is my hero'
def test_binhex(self):
f = open(self.fname1, 'w')
f.write(self.DATA)
f.close()
binhex.binhex(self.fname1, self.fname2)
binhex.hexbin(self.fname2, self.fname1)
f = open(self.fname1, 'r')
finish = f.readline()
f.close()
self.assertEqual(self.DATA, finish)
def test_main():
test_support.run_unittest(BinHexTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
CristianBB/SickRage | lib/unidecode/x084.py | 252 | 4646 | data = (
'Hu ', # 0x00
'Qi ', # 0x01
'He ', # 0x02
'Cui ', # 0x03
'Tao ', # 0x04
'Chun ', # 0x05
'Bei ', # 0x06
'Chang ', # 0x07
'Huan ', # 0x08
'Fei ', # 0x09
'Lai ', # 0x0a
'Qi ', # 0x0b
'Meng ', # 0x0c
'Ping ', # 0x0d
'Wei ', # 0x0e
'Dan ', # 0x0f
'Sha ', # 0x10
'Huan ', # 0x11
'Yan ', # 0x12
'Yi ', # 0x13
'Tiao ', # 0x14
'Qi ', # 0x15
'Wan ', # 0x16
'Ce ', # 0x17
'Nai ', # 0x18
'Kutabireru ', # 0x19
'Tuo ', # 0x1a
'Jiu ', # 0x1b
'Tie ', # 0x1c
'Luo ', # 0x1d
'[?] ', # 0x1e
'[?] ', # 0x1f
'Meng ', # 0x20
'[?] ', # 0x21
'Yaji ', # 0x22
'[?] ', # 0x23
'Ying ', # 0x24
'Ying ', # 0x25
'Ying ', # 0x26
'Xiao ', # 0x27
'Sa ', # 0x28
'Qiu ', # 0x29
'Ke ', # 0x2a
'Xiang ', # 0x2b
'Wan ', # 0x2c
'Yu ', # 0x2d
'Yu ', # 0x2e
'Fu ', # 0x2f
'Lian ', # 0x30
'Xuan ', # 0x31
'Yuan ', # 0x32
'Nan ', # 0x33
'Ze ', # 0x34
'Wo ', # 0x35
'Chun ', # 0x36
'Xiao ', # 0x37
'Yu ', # 0x38
'Pian ', # 0x39
'Mao ', # 0x3a
'An ', # 0x3b
'E ', # 0x3c
'Luo ', # 0x3d
'Ying ', # 0x3e
'Huo ', # 0x3f
'Gua ', # 0x40
'Jiang ', # 0x41
'Mian ', # 0x42
'Zuo ', # 0x43
'Zuo ', # 0x44
'Ju ', # 0x45
'Bao ', # 0x46
'Rou ', # 0x47
'Xi ', # 0x48
'Xie ', # 0x49
'An ', # 0x4a
'Qu ', # 0x4b
'Jian ', # 0x4c
'Fu ', # 0x4d
'Lu ', # 0x4e
'Jing ', # 0x4f
'Pen ', # 0x50
'Feng ', # 0x51
'Hong ', # 0x52
'Hong ', # 0x53
'Hou ', # 0x54
'Yan ', # 0x55
'Tu ', # 0x56
'Zhu ', # 0x57
'Zi ', # 0x58
'Xiang ', # 0x59
'Shen ', # 0x5a
'Ge ', # 0x5b
'Jie ', # 0x5c
'Jing ', # 0x5d
'Mi ', # 0x5e
'Huang ', # 0x5f
'Shen ', # 0x60
'Pu ', # 0x61
'Gai ', # 0x62
'Dong ', # 0x63
'Zhou ', # 0x64
'Qian ', # 0x65
'Wei ', # 0x66
'Bo ', # 0x67
'Wei ', # 0x68
'Pa ', # 0x69
'Ji ', # 0x6a
'Hu ', # 0x6b
'Zang ', # 0x6c
'Jia ', # 0x6d
'Duan ', # 0x6e
'Yao ', # 0x6f
'Jun ', # 0x70
'Cong ', # 0x71
'Quan ', # 0x72
'Wei ', # 0x73
'Xian ', # 0x74
'Kui ', # 0x75
'Ting ', # 0x76
'Hun ', # 0x77
'Xi ', # 0x78
'Shi ', # 0x79
'Qi ', # 0x7a
'Lan ', # 0x7b
'Zong ', # 0x7c
'Yao ', # 0x7d
'Yuan ', # 0x7e
'Mei ', # 0x7f
'Yun ', # 0x80
'Shu ', # 0x81
'Di ', # 0x82
'Zhuan ', # 0x83
'Guan ', # 0x84
'Sukumo ', # 0x85
'Xue ', # 0x86
'Chan ', # 0x87
'Kai ', # 0x88
'Kui ', # 0x89
'[?] ', # 0x8a
'Jiang ', # 0x8b
'Lou ', # 0x8c
'Wei ', # 0x8d
'Pai ', # 0x8e
'[?] ', # 0x8f
'Sou ', # 0x90
'Yin ', # 0x91
'Shi ', # 0x92
'Chun ', # 0x93
'Shi ', # 0x94
'Yun ', # 0x95
'Zhen ', # 0x96
'Lang ', # 0x97
'Nu ', # 0x98
'Meng ', # 0x99
'He ', # 0x9a
'Que ', # 0x9b
'Suan ', # 0x9c
'Yuan ', # 0x9d
'Li ', # 0x9e
'Ju ', # 0x9f
'Xi ', # 0xa0
'Pang ', # 0xa1
'Chu ', # 0xa2
'Xu ', # 0xa3
'Tu ', # 0xa4
'Liu ', # 0xa5
'Wo ', # 0xa6
'Zhen ', # 0xa7
'Qian ', # 0xa8
'Zu ', # 0xa9
'Po ', # 0xaa
'Cuo ', # 0xab
'Yuan ', # 0xac
'Chu ', # 0xad
'Yu ', # 0xae
'Kuai ', # 0xaf
'Pan ', # 0xb0
'Pu ', # 0xb1
'Pu ', # 0xb2
'Na ', # 0xb3
'Shuo ', # 0xb4
'Xi ', # 0xb5
'Fen ', # 0xb6
'Yun ', # 0xb7
'Zheng ', # 0xb8
'Jian ', # 0xb9
'Ji ', # 0xba
'Ruo ', # 0xbb
'Cang ', # 0xbc
'En ', # 0xbd
'Mi ', # 0xbe
'Hao ', # 0xbf
'Sun ', # 0xc0
'Zhen ', # 0xc1
'Ming ', # 0xc2
'Sou ', # 0xc3
'Xu ', # 0xc4
'Liu ', # 0xc5
'Xi ', # 0xc6
'Gu ', # 0xc7
'Lang ', # 0xc8
'Rong ', # 0xc9
'Weng ', # 0xca
'Gai ', # 0xcb
'Cuo ', # 0xcc
'Shi ', # 0xcd
'Tang ', # 0xce
'Luo ', # 0xcf
'Ru ', # 0xd0
'Suo ', # 0xd1
'Xian ', # 0xd2
'Bei ', # 0xd3
'Yao ', # 0xd4
'Gui ', # 0xd5
'Bi ', # 0xd6
'Zong ', # 0xd7
'Gun ', # 0xd8
'Za ', # 0xd9
'Xiu ', # 0xda
'Ce ', # 0xdb
'Hai ', # 0xdc
'Lan ', # 0xdd
'[?] ', # 0xde
'Ji ', # 0xdf
'Li ', # 0xe0
'Can ', # 0xe1
'Lang ', # 0xe2
'Yu ', # 0xe3
'[?] ', # 0xe4
'Ying ', # 0xe5
'Mo ', # 0xe6
'Diao ', # 0xe7
'Tiao ', # 0xe8
'Mao ', # 0xe9
'Tong ', # 0xea
'Zhu ', # 0xeb
'Peng ', # 0xec
'An ', # 0xed
'Lian ', # 0xee
'Cong ', # 0xef
'Xi ', # 0xf0
'Ping ', # 0xf1
'Qiu ', # 0xf2
'Jin ', # 0xf3
'Chun ', # 0xf4
'Jie ', # 0xf5
'Wei ', # 0xf6
'Tui ', # 0xf7
'Cao ', # 0xf8
'Yu ', # 0xf9
'Yi ', # 0xfa
'Ji ', # 0xfb
'Liao ', # 0xfc
'Bi ', # 0xfd
'Lu ', # 0xfe
'Su ', # 0xff
)
| gpl-3.0 |
scotthartbti/android_external_chromium_org | build/android/pylib/gtest/test_runner.py | 28 | 7552 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
from pylib import android_commands
from pylib import constants
from pylib import pexpect
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.perf import perf_control
def _TestSuiteRequiresMockTestServer(suite_name):
"""Returns True if the test suite requires mock test server."""
tests_require_net_test_server = ['unit_tests', 'net_unittests',
'content_unittests',
'content_browsertests']
return (suite_name in
tests_require_net_test_server)
def _TestSuiteRequiresHighPerfMode(suite_name):
"""Returns True if the test suite requires high performance mode."""
return 'perftests' in suite_name
class TestRunner(base_test_runner.BaseTestRunner):
def __init__(self, test_options, device, test_package):
"""Single test suite attached to a single device.
Args:
test_options: A GTestOptions object.
device: Device to run the tests.
test_package: An instance of TestPackage class.
"""
super(TestRunner, self).__init__(device, test_options.tool,
test_options.push_deps,
test_options.cleanup_test_files)
self.test_package = test_package
self.test_package.tool = self.tool
self._test_arguments = test_options.test_arguments
timeout = test_options.timeout
if timeout == 0:
timeout = 60
# On a VM (e.g. chromium buildbots), this timeout is way too small.
if os.environ.get('BUILDBOT_SLAVENAME'):
timeout = timeout * 2
self._timeout = timeout * self.tool.GetTimeoutScale()
if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
self._perf_controller = perf_control.PerfControl(self.adb)
#override
def InstallTestPackage(self):
self.test_package.Install(self.adb)
def GetAllTests(self):
"""Install test package and get a list of all tests."""
self.test_package.Install(self.adb)
return self.test_package.GetAllTests(self.adb)
#override
def PushDataDeps(self):
self.adb.WaitForSdCardReady(20)
self.tool.CopyFiles()
if os.path.exists(constants.ISOLATE_DEPS_DIR):
device_dir = self.adb.GetExternalStorage()
# TODO(frankf): linux_dumper_unittest_helper needs to be in the same dir
# as breakpad_unittests exe. Find a better way to do this.
if self.test_package.suite_name == 'breakpad_unittests':
device_dir = constants.TEST_EXECUTABLE_DIR
for p in os.listdir(constants.ISOLATE_DEPS_DIR):
self.adb.PushIfNeeded(
os.path.join(constants.ISOLATE_DEPS_DIR, p),
os.path.join(device_dir, p))
def _ParseTestOutput(self, p):
"""Process the test output.
Args:
p: An instance of pexpect spawn class.
Returns:
A TestRunResults object.
"""
results = base_test_result.TestRunResults()
# Test case statuses.
re_run = re.compile('\[ RUN \] ?(.*)\r\n')
re_fail = re.compile('\[ FAILED \] ?(.*)\r\n')
re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n')
# Test run statuses.
re_passed = re.compile('\[ PASSED \] ?(.*)\r\n')
re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
# Signal handlers are installed before starting tests
# to output the CRASHED marker when a crash happens.
re_crash = re.compile('\[ CRASHED \](.*)\r\n')
log = ''
try:
while True:
full_test_name = None
found = p.expect([re_run, re_passed, re_runner_fail],
timeout=self._timeout)
if found == 1: # re_passed
break
elif found == 2: # re_runner_fail
break
else: # re_run
full_test_name = p.match.group(1).replace('\r', '')
found = p.expect([re_ok, re_fail, re_crash], timeout=self._timeout)
log = p.before.replace('\r', '')
if found == 0: # re_ok
if full_test_name == p.match.group(1).replace('\r', ''):
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.PASS,
log=log))
elif found == 2: # re_crash
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.CRASH,
log=log))
break
else: # re_fail
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.FAIL, log=log))
except pexpect.EOF:
logging.error('Test terminated - EOF')
# We're here because either the device went offline, or the test harness
# crashed without outputting the CRASHED marker (crbug.com/175538).
if not self.adb.IsOnline():
raise android_commands.errors.DeviceUnresponsiveError(
'Device %s went offline.' % self.device)
if full_test_name:
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.CRASH,
log=p.before.replace('\r', '')))
except pexpect.TIMEOUT:
logging.error('Test terminated after %d second timeout.',
self._timeout)
if full_test_name:
results.AddResult(base_test_result.BaseTestResult(
full_test_name, base_test_result.ResultType.TIMEOUT,
log=p.before.replace('\r', '')))
finally:
p.close()
ret_code = self.test_package.GetGTestReturnCode(self.adb)
if ret_code:
logging.critical(
'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
ret_code, p.before, p.after)
return results
#override
def RunTest(self, test):
test_results = base_test_result.TestRunResults()
if not test:
return test_results, None
try:
self.test_package.ClearApplicationState(self.adb)
self.test_package.CreateCommandLineFileOnDevice(
self.adb, test, self._test_arguments)
test_results = self._ParseTestOutput(
self.test_package.SpawnTestProcess(self.adb))
finally:
self.CleanupSpawningServerState()
# Calculate unknown test results.
all_tests = set(test.split(':'))
all_tests_ran = set([t.GetName() for t in test_results.GetAll()])
unknown_tests = all_tests - all_tests_ran
test_results.AddResults(
[base_test_result.BaseTestResult(t, base_test_result.ResultType.UNKNOWN)
for t in unknown_tests])
retry = ':'.join([t.GetName() for t in test_results.GetNotPass()])
return test_results, retry
#override
def SetUp(self):
"""Sets up necessary test enviroment for the test suite."""
super(TestRunner, self).SetUp()
if _TestSuiteRequiresMockTestServer(self.test_package.suite_name):
self.LaunchChromeTestServerSpawner()
if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
self._perf_controller.SetHighPerfMode()
self.tool.SetupEnvironment()
#override
def TearDown(self):
"""Cleans up the test enviroment for the test suite."""
if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
self._perf_controller.RestoreOriginalPerfMode()
self.test_package.ClearApplicationState(self.adb)
self.tool.CleanUpEnvironment()
super(TestRunner, self).TearDown()
| bsd-3-clause |
rapidpro/chatpro | chatpro/profiles/models.py | 1 | 4753 | from __future__ import absolute_import, unicode_literals
from chatpro.rooms.models import Room
from dash.orgs.models import Org
from dash.utils import intersection
from dash.utils.sync import ChangeType
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from temba.types import Contact as TembaContact
from uuid import uuid4
from .tasks import push_contact_change
class AbstractParticipant(models.Model):
full_name = models.CharField(verbose_name=_("Full name"), max_length=128, null=True)
chat_name = models.CharField(verbose_name=_("Chat name"), max_length=16, null=True,
help_text=_("Shorter name used for chat messages"))
class Meta:
abstract = True
class Contact(AbstractParticipant):
"""
Corresponds to a RapidPro contact who is tied to a single room
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='contacts')
room = models.ForeignKey(Room, verbose_name=_("Room"), related_name='contacts',
help_text=_("Room which this contact belongs in"))
urn = models.CharField(verbose_name=_("URN"), max_length=255)
is_active = models.BooleanField(default=True, help_text=_("Whether this contact is active"))
created_by = models.ForeignKey(User, null=True, related_name="contact_creations",
help_text="The user which originally created this item")
created_on = models.DateTimeField(auto_now_add=True,
help_text="When this item was originally created")
modified_by = models.ForeignKey(User, null=True, related_name="contact_modifications",
help_text="The user which last modified this item")
modified_on = models.DateTimeField(auto_now=True,
help_text="When this item was last modified")
@classmethod
def create(cls, org, user, full_name, chat_name, urn, room, uuid=None):
if org.id != room.org_id: # pragma: no cover
raise ValueError("Room does not belong to org")
# if we don't have a UUID, then we created this contact
if not uuid:
do_push = True
uuid = unicode(uuid4())
else:
do_push = False
# create contact
contact = cls.objects.create(org=org, full_name=full_name, chat_name=chat_name, urn=urn, room=room, uuid=uuid,
created_by=user, modified_by=user)
if do_push:
contact.push(ChangeType.created)
return contact
@classmethod
def kwargs_from_temba(cls, org, temba_contact):
org_room_uuids = [r.uuid for r in Room.get_all(org)]
room_uuids = intersection(org_room_uuids, temba_contact.groups)
room = Room.objects.get(org=org, uuid=room_uuids[0]) if room_uuids else None
if not room:
raise ValueError("No room with uuid in %s" % ", ".join(temba_contact.groups))
return dict(org=org,
full_name=temba_contact.name,
chat_name=temba_contact.fields.get(org.get_chat_name_field(), None),
urn=temba_contact.urns[0],
room=room,
uuid=temba_contact.uuid)
def as_temba(self):
temba_contact = TembaContact()
temba_contact.name = self.full_name
temba_contact.urns = [self.urn]
temba_contact.fields = {self.org.get_chat_name_field(): self.chat_name}
temba_contact.groups = [self.room.uuid]
temba_contact.uuid = self.uuid
return temba_contact
def push(self, change_type):
push_contact_change.delay(self.id, change_type)
def get_urn(self):
return tuple(self.urn.split(':', 1))
def release(self):
self.is_active = False
self.save()
self.push(ChangeType.deleted)
def as_participant_json(self):
return dict(id=self.id, type='C', full_name=self.full_name, chat_name=self.chat_name)
def __unicode__(self):
if self.full_name:
return self.full_name
elif self.chat_name:
return self.chat_name
else:
return self.get_urn()[1]
class Profile(AbstractParticipant):
"""
Extension for the user class
"""
user = models.OneToOneField(User)
change_password = models.BooleanField(default=False, help_text=_("User must change password on next login"))
def as_participant_json(self):
return dict(id=self.user_id, type='U', full_name=self.full_name, chat_name=self.chat_name)
| bsd-3-clause |
kaiix/depot_tools | third_party/logilab/common/date.py | 89 | 11230 | # copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Date manipulation helper functions."""
from __future__ import division
__docformat__ = "restructuredtext en"
import math
import re
import sys
from locale import getlocale, LC_TIME
from datetime import date, time, datetime, timedelta
from time import strptime as time_strptime
from calendar import monthrange, timegm
from six.moves import range
try:
from mx.DateTime import RelativeDateTime, Date, DateTimeType
except ImportError:
endOfMonth = None
DateTimeType = datetime
else:
endOfMonth = RelativeDateTime(months=1, day=-1)
# NOTE: should we implement a compatibility layer between date representations
# as we have in lgc.db ?
FRENCH_FIXED_HOLIDAYS = {
'jour_an': '%s-01-01',
'fete_travail': '%s-05-01',
'armistice1945': '%s-05-08',
'fete_nat': '%s-07-14',
'assomption': '%s-08-15',
'toussaint': '%s-11-01',
'armistice1918': '%s-11-11',
'noel': '%s-12-25',
}
FRENCH_MOBILE_HOLIDAYS = {
'paques2004': '2004-04-12',
'ascension2004': '2004-05-20',
'pentecote2004': '2004-05-31',
'paques2005': '2005-03-28',
'ascension2005': '2005-05-05',
'pentecote2005': '2005-05-16',
'paques2006': '2006-04-17',
'ascension2006': '2006-05-25',
'pentecote2006': '2006-06-05',
'paques2007': '2007-04-09',
'ascension2007': '2007-05-17',
'pentecote2007': '2007-05-28',
'paques2008': '2008-03-24',
'ascension2008': '2008-05-01',
'pentecote2008': '2008-05-12',
'paques2009': '2009-04-13',
'ascension2009': '2009-05-21',
'pentecote2009': '2009-06-01',
'paques2010': '2010-04-05',
'ascension2010': '2010-05-13',
'pentecote2010': '2010-05-24',
'paques2011': '2011-04-25',
'ascension2011': '2011-06-02',
'pentecote2011': '2011-06-13',
'paques2012': '2012-04-09',
'ascension2012': '2012-05-17',
'pentecote2012': '2012-05-28',
}
# XXX this implementation cries for multimethod dispatching
def get_step(dateobj, nbdays=1):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(dateobj, date):
return ONEDAY * nbdays
return nbdays # mx.DateTime is ok with integers
def datefactory(year, month, day, sampledate):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(sampledate, datetime):
return datetime(year, month, day)
if isinstance(sampledate, date):
return date(year, month, day)
return Date(year, month, day)
def weekday(dateobj):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(dateobj, date):
return dateobj.weekday()
return dateobj.day_of_week
def str2date(datestr, sampledate):
# NOTE: datetime.strptime is not an option until we drop py2.4 compat
year, month, day = [int(chunk) for chunk in datestr.split('-')]
return datefactory(year, month, day, sampledate)
def days_between(start, end):
if isinstance(start, date):
delta = end - start
# datetime.timedelta.days is always an integer (floored)
if delta.seconds:
return delta.days + 1
return delta.days
else:
return int(math.ceil((end - start).days))
def get_national_holidays(begin, end):
"""return french national days off between begin and end"""
begin = datefactory(begin.year, begin.month, begin.day, begin)
end = datefactory(end.year, end.month, end.day, end)
holidays = [str2date(datestr, begin)
for datestr in FRENCH_MOBILE_HOLIDAYS.values()]
for year in range(begin.year, end.year+1):
for datestr in FRENCH_FIXED_HOLIDAYS.values():
date = str2date(datestr % year, begin)
if date not in holidays:
holidays.append(date)
return [day for day in holidays if begin <= day < end]
def add_days_worked(start, days):
"""adds date but try to only take days worked into account"""
step = get_step(start)
weeks, plus = divmod(days, 5)
end = start + ((weeks * 7) + plus) * step
if weekday(end) >= 5: # saturday or sunday
end += (2 * step)
end += len([x for x in get_national_holidays(start, end + step)
if weekday(x) < 5]) * step
if weekday(end) >= 5: # saturday or sunday
end += (2 * step)
return end
def nb_open_days(start, end):
assert start <= end
step = get_step(start)
days = days_between(start, end)
weeks, plus = divmod(days, 7)
if weekday(start) > weekday(end):
plus -= 2
elif weekday(end) == 6:
plus -= 1
open_days = weeks * 5 + plus
nb_week_holidays = len([x for x in get_national_holidays(start, end+step)
if weekday(x) < 5 and x < end])
open_days -= nb_week_holidays
if open_days < 0:
return 0
return open_days
def date_range(begin, end, incday=None, incmonth=None):
"""yields each date between begin and end
:param begin: the start date
:param end: the end date
:param incr: the step to use to iterate over dates. Default is
one day.
:param include: None (means no exclusion) or a function taking a
date as parameter, and returning True if the date
should be included.
When using mx datetime, you should *NOT* use incmonth argument, use instead
oneDay, oneHour, oneMinute, oneSecond, oneWeek or endOfMonth (to enumerate
months) as `incday` argument
"""
assert not (incday and incmonth)
begin = todate(begin)
end = todate(end)
if incmonth:
while begin < end:
yield begin
begin = next_month(begin, incmonth)
else:
incr = get_step(begin, incday or 1)
while begin < end:
yield begin
begin += incr
# makes py datetime usable #####################################################
ONEDAY = timedelta(days=1)
ONEWEEK = timedelta(days=7)
try:
strptime = datetime.strptime
except AttributeError: # py < 2.5
from time import strptime as time_strptime
def strptime(value, format):
return datetime(*time_strptime(value, format)[:6])
def strptime_time(value, format='%H:%M'):
return time(*time_strptime(value, format)[3:6])
def todate(somedate):
"""return a date from a date (leaving unchanged) or a datetime"""
if isinstance(somedate, datetime):
return date(somedate.year, somedate.month, somedate.day)
assert isinstance(somedate, (date, DateTimeType)), repr(somedate)
return somedate
def totime(somedate):
"""return a time from a time (leaving unchanged), date or datetime"""
# XXX mx compat
if not isinstance(somedate, time):
return time(somedate.hour, somedate.minute, somedate.second)
assert isinstance(somedate, (time)), repr(somedate)
return somedate
def todatetime(somedate):
"""return a date from a date (leaving unchanged) or a datetime"""
# take care, datetime is a subclass of date
if isinstance(somedate, datetime):
return somedate
assert isinstance(somedate, (date, DateTimeType)), repr(somedate)
return datetime(somedate.year, somedate.month, somedate.day)
def datetime2ticks(somedate):
return timegm(somedate.timetuple()) * 1000
def ticks2datetime(ticks):
miliseconds, microseconds = divmod(ticks, 1000)
try:
return datetime.fromtimestamp(miliseconds)
except (ValueError, OverflowError):
epoch = datetime.fromtimestamp(0)
nb_days, seconds = divmod(int(miliseconds), 86400)
delta = timedelta(nb_days, seconds=seconds, microseconds=microseconds)
try:
return epoch + delta
except (ValueError, OverflowError):
raise
def days_in_month(somedate):
return monthrange(somedate.year, somedate.month)[1]
def days_in_year(somedate):
feb = date(somedate.year, 2, 1)
if days_in_month(feb) == 29:
return 366
else:
return 365
def previous_month(somedate, nbmonth=1):
while nbmonth:
somedate = first_day(somedate) - ONEDAY
nbmonth -= 1
return somedate
def next_month(somedate, nbmonth=1):
while nbmonth:
somedate = last_day(somedate) + ONEDAY
nbmonth -= 1
return somedate
def first_day(somedate):
return date(somedate.year, somedate.month, 1)
def last_day(somedate):
return date(somedate.year, somedate.month, days_in_month(somedate))
def ustrftime(somedate, fmt='%Y-%m-%d'):
"""like strftime, but returns a unicode string instead of an encoded
string which may be problematic with localized date.
"""
if sys.version_info >= (3, 3):
# datetime.date.strftime() supports dates since year 1 in Python >=3.3.
return somedate.strftime(fmt)
else:
try:
if sys.version_info < (3, 0):
encoding = getlocale(LC_TIME)[1] or 'ascii'
return unicode(somedate.strftime(str(fmt)), encoding)
else:
return somedate.strftime(fmt)
except ValueError:
if somedate.year >= 1900:
raise
# datetime is not happy with dates before 1900
# we try to work around this, assuming a simple
# format string
fields = {'Y': somedate.year,
'm': somedate.month,
'd': somedate.day,
}
if isinstance(somedate, datetime):
fields.update({'H': somedate.hour,
'M': somedate.minute,
'S': somedate.second})
fmt = re.sub('%([YmdHMS])', r'%(\1)02d', fmt)
return unicode(fmt) % fields
def utcdatetime(dt):
if dt.tzinfo is None:
return dt
return (dt.replace(tzinfo=None) - dt.utcoffset())
def utctime(dt):
if dt.tzinfo is None:
return dt
return (dt + dt.utcoffset() + dt.dst()).replace(tzinfo=None)
def datetime_to_seconds(date):
"""return the number of seconds since the begining of the day for that date
"""
return date.second+60*date.minute + 3600*date.hour
def timedelta_to_days(delta):
"""return the time delta as a number of seconds"""
return delta.days + delta.seconds / (3600*24)
def timedelta_to_seconds(delta):
"""return the time delta as a fraction of days"""
return delta.days*(3600*24) + delta.seconds
| bsd-3-clause |
popazerty/e2_sh4 | tests/test_timer.py | 78 | 2469 | import time
import enigma
import tests
#enigma.reset()
def test_timer(repeat = 0, timer_start = 3600, timer_length = 1000, sim_length = 86400 * 7):
import NavigationInstance
at = time.time()
t = NavigationInstance.instance.RecordTimer
print t
print "old mwt:", t.MaxWaitTime
t.MaxWaitTime = 86400 * 1000
t.processed_timers = [ ]
t.timer_list = [ ]
# generate a timer to test
import xml.etree.cElementTree
import RecordTimer
timer = RecordTimer.createTimer(xml.etree.cElementTree.fromstring(
"""
<timer
begin="%d"
end="%d"
serviceref="1:0:1:6DD2:44D:1:C00000:0:0:0:"
repeated="%d"
name="Test Event Name"
description="Test Event Description"
afterevent="nothing"
eit="56422"
disabled="0"
justplay="0">
</timer>""" % (at + timer_start, at + timer_start + timer_length, repeat)
))
t.record(timer)
# run virtual environment
enigma.run(sim_length)
print "done."
timers = t.processed_timers + t.timer_list
print "start: %s" % (time.ctime(at + 10))
assert len(timers) == 1
for t in timers:
print "begin=%d, end=%d, repeated=%d, state=%d" % (t.begin - at, t.end - at, t.repeated, t.state)
print "begin: %s" % (time.ctime(t.begin))
print "end: %s" % (time.ctime(t.end))
# if repeat, check if the calculated repeated time of day matches the initial time of day
if repeat:
t_initial = time.localtime(at + timer_start)
t_repeated = time.localtime(timers[0].begin)
print t_initial
print t_repeated
if t_initial[3:6] != t_repeated[3:6]:
raise tests.TestError("repeated timer time of day does not match")
#sys.modules["Tools.Notifications"] = FakeNotifications
#sys.modules["Tools.NumericalTextInput.NumericalTextInput"] = FakeNotifications
# required stuff for timer (we try to keep this minimal)
enigma.init_nav()
enigma.init_record_config()
enigma.init_parental_control()
from events import log
import calendar
import os
# we are operating in CET/CEST
os.environ['TZ'] = 'CET'
time.tzset()
#log(test_timer, test_name = "test_timer_repeating", base_time = calendar.timegm((2007, 3, 1, 12, 0, 0)), repeat=0x7f, sim_length = 86400 * 7)
log(test_timer, test_name = "test_timer_repeating_dst_skip", base_time = calendar.timegm((2007, 03, 20, 0, 0, 0)), timer_start = 3600, repeat=0x7f, sim_length = 86400 * 7)
#log(test_timer, test_name = "test_timer_repeating_dst_start", base_time = calendar.timegm((2007, 03, 20, 0, 0, 0)), timer_start = 10000, repeat=0x7f, sim_length = 86400 * 7)
| gpl-2.0 |
AlandSailingRobots/sailingrobot | update_config.py | 1 | 1807 | #!/usr/bin/python3
# Updates the configuration in the json to the database
# Can run without argument for using standard file
# Or specify the file by passing it as a argument
import json
import sqlite3
import sys
if len(sys.argv) > 1:
if str(sys.argv[1]) == 'ASPire':
filename = 'config_ASPire.json'
elif str(sys.argv[1]) == 'Janet':
filename = 'config_Janet.json'
else :
filename = str(sys.argv[1])
else:
filename = 'config_ASPire.json'
print(filename)
try:
cfg = json.load(open(filename))
except FileNotFoundError:
sys.exit('Error to open the file.\nPlease enter in argument either \'ASPire\', \'Janet\' or the filepath.')
conn = sqlite3.connect('asr.db')
db = conn.cursor()
for table in cfg:
data = cfg[table]
setstr = ''
keystr = ''
valstr = ''
for key, value in cfg[table].items():
if isinstance(value, str):
value = '"' + value + '"'
else:
value = str(value)
if (setstr == ''):
setstr = key + ' = ' + value
keystr = key
valstr = value
else:
setstr = setstr + ', ' + key + ' = ' + value
keystr = keystr + ', ' + key
valstr = valstr + ', ' + value
try:
db.execute('SELECT count(*) FROM ' + str(table) + ';')
except sqlite3.OperationalError:
sys.exit('Error to retrieve the tables.\nCheck if the selected file \''+filename+'\' correspond to the current Database configuration')
count = db.fetchone()[0]
if count == 0:
db.execute('INSERT INTO ' + str(table) + ' (' + keystr +
') VALUES (' + valstr + ');')
else:
db.execute('UPDATE ' + str(table) + ' SET ' +
setstr + ' WHERE ID = 1;')
conn.commit()
db.close()
| gpl-2.0 |
benoitsteiner/tensorflow-opencl | tensorflow/python/grappler/memory_optimizer_test.py | 9 | 9038 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training as train
class MemoryOptimizerSwapTest(test.TestCase):
"""Tests the Grappler memory optimizer."""
def testNoSwapping(self):
"""Make sure the graph is preserved when there is nothing to swap."""
a = variables.Variable(10, name='a')
b = variables.Variable(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
nodes = [node.name for node in mg.graph_def.node]
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), graph_size)
self.assertItemsEqual([node.name for node in graph.node], nodes)
def testSimpleSwap(self):
"""Check that the swap annotations are followed."""
a = variables.Variable(10, name='a')
b = variables.Variable(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
d.op.node_def.attr['_swap_to_host'].i = 0
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), graph_size + 2)
self.assertTrue(
set([node.name for node in graph.node]) > set(
['a', 'b', 'c', 'd', 'swap_in_d_0', 'swap_out_d_0']))
for node in graph.node:
if node.name == 'swap_in_d_0':
self.assertEqual('swap_out_d_0', node.input[0])
self.assertEqual('^b/read', node.input[1])
elif node.name == 'swap_out_d_0':
self.assertEqual('b/read', node.input[0])
elif node.name == 'd':
self.assertEqual('swap_in_d_0', node.input[0])
self.assertEqual('c', node.input[1])
class MemoryOptimizerRecomputeTest(test.TestCase):
"""Tests the Python interface to recomputation rewrites.
See core/grappler/optimizers/memory_optimizer_test.cc for functional tests.
"""
def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):
"""A simple layered graph with conv, an intermediate op, and a ReLU."""
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(1)
current_activation = variable_scope.get_variable(
name='start', shape=[batch_size, image_dim, image_dim, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(10):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation = nn.relu(current_activation)
loss = math_ops.reduce_mean(current_activation)
with ops.name_scope(optimizer_scope_name):
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
metagraph = train.export_meta_graph()
return (metagraph, init_op.name, train_op.name, loss.name)
def testRewritingDefaultGradientNames(self):
"""Tests that rewriting occurs with default gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph()
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS),
original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def testRewritingNameScopedGradientNames(self):
"""Tests that rewriting occurs with non-standard gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph(
optimizer_scope_name='optimizer')
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS,
memory_optimizer_target_node_name_prefix='optimizer/gradients/'),
original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def _GetMemoryOptimizerSessionConfig(self):
rewrite_options = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS)
graph_options = config_pb2.GraphOptions(rewrite_options=rewrite_options)
return config_pb2.ConfigProto(graph_options=graph_options)
def _RunMetaGraphWithConfig(
self, config, metagraph, init_op_name, train_op_name, loss_op_name):
graph = ops.Graph()
with graph.as_default():
train.import_meta_graph(metagraph)
init_op = graph.get_operation_by_name(init_op_name)
train_op = graph.get_operation_by_name(train_op_name)
loss_op = graph.get_tensor_by_name(loss_op_name)
with session.Session(config=config, graph=graph) as sess:
sess.run(init_op)
sess.run(train_op)
sess.run(train_op)
return sess.run(loss_op)
def testRecomputationRewritingNoErrors(self):
"""Tests that graph output is not significantly different with rewriting."""
(original_metagraph, init_op_name, train_op_name, loss_op_name
) = self._GetMetaGraph()
original_loss = self._RunMetaGraphWithConfig(
config=config_pb2.ConfigProto(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
memory_optimized_loss = self._RunMetaGraphWithConfig(
config=self._GetMemoryOptimizerSessionConfig(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
self.assertAllClose(original_loss, memory_optimized_loss, rtol=1e-4)
if __name__ == '__main__':
test.main()
| apache-2.0 |
pepetreshere/odoo | doc/_extensions/autojsdoc/parser/tests/support.py | 33 | 1767 | # -*- coding: utf-8 -*-
import operator
import pyjsparser
from autojsdoc.parser import jsdoc, parser
params = operator.attrgetter('name', 'type', 'doc')
def parse(s, source=None):
tree = pyjsparser.parse(s)
mods = parser.ModuleMatcher(source).visit(tree)
post(mods)
return mods
def post(mods):
modules = dict(BASE_MODULES)
modules.update((m.name, m) for m in mods)
for mod in mods:
mod.post_process(modules)
BASE_MODULES = {
'other': jsdoc.ModuleDoc({
'module': 'other',
'_members': [
('<exports>', jsdoc.LiteralDoc({'name': 'value', 'value': "ok"})),
],
}),
'dep2': jsdoc.ModuleDoc({
'module': 'dep2',
'_members': [
('<exports>', jsdoc.LiteralDoc({'value': 42.})),
],
}),
'dep3': jsdoc.ModuleDoc({
'module': 'dep3',
'_members': [
('<exports>', jsdoc.LiteralDoc({'value': 56.})),
],
}),
'Class': jsdoc.ModuleDoc({
'module': 'Class',
'_members': [
('<exports>', jsdoc.ClassDoc({
'name': 'Class',
'doc': "Base Class"
})),
],
}),
'mixins': jsdoc.ModuleDoc({
'module': 'mixins',
'_members': [
('<exports>', jsdoc.NSDoc({
'name': 'mixins',
'_members': [
('Bob', jsdoc.ClassDoc({'class': "Bob"})),
]
})),
],
}),
'Mixin': jsdoc.ModuleDoc({
'module': 'Mixin',
'_members': [
('<exports>', jsdoc.MixinDoc({
'_members': [
('a', jsdoc.FunctionDoc({'function': 'a'})),
]
})),
],
})
}
| agpl-3.0 |
junfeng-hu/blog | blog/handlers.py | 3 | 1071 | #!/usr/bin/env python
#---coding=utf8---
from HomeHandler import HomeHandler
from LoginHandler import LoginHandler
from LogoutHandler import LogoutHandler
from ArchivesHandler import ArchivesHandler
from CategoryHandler import CategoryHandler
from TagHandler import TagHandler
from PageHandler import PageHandler
from SearchHandler import SearchHandler
from AdminHome import AdminHome
from ListPost import ListPost
from EditPost import EditPost
from ListComment import ListComment
from ListTag import ListTag
from ListCategory import ListCategory
from ListHtml import ListHtml
handlers = [
(r"/", HomeHandler),
(r"/login", LoginHandler),
(r"/logout",LogoutHandler),
(r"/archives/([\d]*)",ArchivesHandler),
(r"/category",CategoryHandler),
(r"/tag",TagHandler),
(r"/page",PageHandler),
(r"/search",SearchHandler),
(r"/admin/",AdminHome),
(r"/list/post",ListPost),
(r"/edit/post",EditPost),
(r"/list/comment",ListComment),
(r"/list/tag",ListTag),
(r"/list/category",ListCategory),
(r"/list/html",ListHtml),
]
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/scipy/io/wavfile.py | 19 | 12587 | """
Module to read / write wav files using numpy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a numpy array as a WAV file.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy
import struct
import warnings
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
WAVE_FORMAT_PCM = 0x0001
WAVE_FORMAT_IEEE_FLOAT = 0x0003
WAVE_FORMAT_EXTENSIBLE = 0xfffe
KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT)
# assumes file pointer is immediately
# after the 'fmt ' id
def _read_fmt_chunk(fid, is_big_endian):
"""
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack(fmt+'I', fid.read(4))[0]
bytes_read = 0
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read += 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT_EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order,
# rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
raise ValueError("Unknown wave file format")
# move file pointer to next chunk
if size > (bytes_read):
fid.read(size - bytes_read)
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
# assumes file pointer is immediately after the 'data' id
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
mmap=False):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
# Size of the data subchunk in bytes
size = struct.unpack(fmt, fid.read(4))[0]
# Number of bytes per sample
bytes_per_sample = bit_depth//8
if bit_depth == 8:
dtype = 'u1'
else:
if is_big_endian:
dtype = '>'
else:
dtype = '<'
if format_tag == WAVE_FORMAT_PCM:
dtype += 'i%d' % bytes_per_sample
else:
dtype += 'f%d' % bytes_per_sample
if not mmap:
data = numpy.fromstring(fid.read(size), dtype=dtype)
else:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(size//bytes_per_sample,))
fid.seek(start + size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
def _read_riff_chunk(fid):
str1 = fid.read(4) # File signature
if str1 == b'RIFF':
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_big_endian = True
fmt = '>I'
else:
# There are also .wav files with "FFIR" or "XFIR" signatures?
raise ValueError("File format {}... not "
"understood.".format(repr(str1)))
# Size of entire file
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if str2 != b'WAVE':
raise ValueError("Not a WAV file.")
return file_size, is_big_endian
def read(filename, mmap=False):
"""
Open a WAV file
Return the sample rate (in samples/sec) and data from a WAV file.
Parameters
----------
filename : string or open file handle
Input wav file.
mmap : bool, optional
Whether to read data as memory-mapped.
Only to be used on real files (Default: False).
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of wav file.
data : numpy array
Data read from wav file. Data-type is determined from the file;
see Notes.
Notes
-----
This function cannot read wav files with 24-bit data.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www-mmsp.ece.mcgill.ca/documents/audioformats/wave/Docs/riffmci.pdf
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
channels = 1
bit_depth = 8
format_tag = WAVE_FORMAT_PCM
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if not chunk_id:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
raise ValueError("Incomplete wav chunk.")
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if bit_depth not in (8, 16, 32, 64, 96, 128):
raise ValueError("Unsupported bit depth: the wav file "
"has {}-bit data.".format(bit_depth))
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in (b'JUNK', b'Fake'):
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
"""
Write a numpy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D numpy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www-mmsp.ece.mcgill.ca/documents/audioformats/wave/Docs/riffmci.pdf
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT_IEEE_FLOAT
else:
format_tag = WAVE_FORMAT_PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
if sys.version_info[0] >= 3:
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
else:
def _array_tofile(fid, data):
fid.write(data.tostring())
| mit |
gnemoug/scrapy | scrapy/tests/test_link.py | 27 | 1978 | import unittest
import warnings
from scrapy.link import Link
class LinkTest(unittest.TestCase):
def _assert_same_links(self, link1, link2):
self.assertEqual(link1, link2)
self.assertEqual(hash(link1), hash(link2))
def _assert_different_links(self, link1, link2):
self.assertNotEqual(link1, link2)
self.assertNotEqual(hash(link1), hash(link2))
def test_eq_and_hash(self):
l1 = Link("http://www.example.com")
l2 = Link("http://www.example.com/other")
l3 = Link("http://www.example.com")
self._assert_same_links(l1, l1)
self._assert_different_links(l1, l2)
self._assert_same_links(l1, l3)
l4 = Link("http://www.example.com", text="test")
l5 = Link("http://www.example.com", text="test2")
l6 = Link("http://www.example.com", text="test")
self._assert_same_links(l4, l4)
self._assert_different_links(l4, l5)
self._assert_same_links(l4, l6)
l7 = Link("http://www.example.com", text="test", fragment='something', nofollow=False)
l8 = Link("http://www.example.com", text="test", fragment='something', nofollow=False)
l9 = Link("http://www.example.com", text="test", fragment='something', nofollow=True)
l10 = Link("http://www.example.com", text="test", fragment='other', nofollow=False)
self._assert_same_links(l7, l8)
self._assert_different_links(l7, l9)
self._assert_different_links(l7, l10)
def test_repr(self):
l1 = Link("http://www.example.com", text="test", fragment='something', nofollow=True)
l2 = eval(repr(l1))
self._assert_same_links(l1, l2)
def test_unicode_url(self):
with warnings.catch_warnings(record=True) as w:
l = Link(u"http://www.example.com/\xa3")
assert isinstance(l.url, str)
assert l.url == 'http://www.example.com/\xc2\xa3'
assert len(w) == 1, "warning not issued"
| bsd-3-clause |
xu6148152/Binea_Python_Project | PythonCookbook/text_str/strs_and_text.py | 1 | 7706 | # !python3
import re
def test_re_split():
line = 'asdf fjdk; dfjkaf, fdjksf, jdksf, foo'
print(re.split(r'[;,\s]\s*', line))
fields = re.split(r'(;|,|\s)\s*', line)
print(fields)
values = fields[::2]
print(values)
delimiter = fields[1::2] + ['']
print(delimiter)
print(re.split(r'(?:,|;|\s)\s*', line))
def test_start_with():
filenames = ['Makefile', 'foo.c', 'bar.py', 'spam.c', 'spam.h']
print([name for name in filenames if name.endswith(('.c', '.h'))])
print(any(name.endswith('.py')) for name in filenames)
def test_fnmatch():
from fnmatch import fnmatch, fnmatchcase
print(fnmatch('foo.txt', '*.txt'))
print(fnmatchcase('foo.txt', '*.TXT'))
def test_str_match():
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
text1 = '11/27/2012'
text2 = 'Nov 27, 2012'
m = datepat.match(text1)
print(m.group(0))
print(m.group(1))
print(m.group(2))
print(m.group(3))
print(m.groups())
text = 'Today is 11/27/2012. PyCon starts 3/13/2013'
print(datepat.findall(text))
def test_str_replace():
text = 'Today is 11/27/2012. PyCon starts 3/13/2013'
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
print(datepat.sub(r'\3-\1-\2', text))
print(datepat.sub(change_date, text))
def change_date(m):
from calendar import month_abbr
mon_name = month_abbr[int(m.group(1))]
return '{} {} {}'.format(m.group(2), mon_name, m.group(3))
def test_unicode():
s1 = 'Spicy Jalape\u00f1o'
s2 = 'Spicy Jalapen\u0303o'
s3 = 'Spicy Jalape\xf1o'
import unicodedata
# NFC表示字符整体组成
t1 = unicodedata.normalize('NFC', s1)
t2 = unicodedata.normalize('NFC', s2)
# NFD表示字符分解多个表示
t3 = unicodedata.normalize('NFD', s3)
print(t1)
print(t2)
print(t3)
def test_strip():
s = ' Hello world \n'
print(s.strip())
t = '--------------hello========'
print(t.strip('-='))
def test_translate():
import unicodedata
import sys
digitmap = {c: ord('0') + unicodedata.digit(chr(c))
for c in range(sys.maxunicode)
if unicodedata.category(chr(c)) == 'Nd'}
x = '\u0661\u0662\u0663'
print(x.translate(digitmap))
def test_just():
text = 'Hello World'
print(text.ljust(20, '='))
print(text.rjust(20))
print(text.center(20, '*'))
print(format(text, '=>20'))
print(format(text, '*^20'))
print('{:>10s} {:>10s}'.format('Hello', 'World'))
def test_join():
parts = ['Is', 'Chicago', 'Not', 'Chicago?']
print(' '.join(parts))
print(','.join(parts))
print(''.join(parts))
a = 'Is Chicago'
b = 'Not Chicago'
c = 'None'
print(a + ' ' + b)
print('Hello' 'World')
date = ['ACME', 50, 91.1]
print(','.join(str(d) for d in date))
print(a, b, c, sep=':')
def test_format():
s = '{name} has {n} message'
print(s.format(name='Guido', n=37))
name = 'Guido'
# n = 37
# print(s.format_map(vars()))
print(s.format_map(SafeSub(vars())))
print(sub('Hello {name}'))
print(sub('You have {n} messages.'))
class SafeSub(dict):
def __missing__(self, key):
return '{' + key + '}'
def sub(text):
import sys
return text.format_map(SafeSub(sys._getframe(1).f_locals))
def test_textwrap():
s = "Look into my eyes, look into my eyes, the eyes, the eyes, " \
"the eyes, not around the eyes, don't look around the eyes," \
"look into my eyes, you're under"
import textwrap
print(textwrap.fill(s, 40, initial_indent=' '))
print(textwrap.fill(s, 40, subsequent_indent=' '))
# os.get_terminal_size().columns
def generate_tokens(pat, text):
from collections import namedtuple
Token = namedtuple('Token', ['type', 'value'])
scanner = pat.scanner(text)
for m in iter(scanner.match, None):
yield Token(m.lastgroup, m.group())
def test_bin_text():
a = b'Hello World'
print(a)
print(a[0])
print(a.decode('ascii'))
def test_gz_file():
import gzip
with gzip.open('somefile.gz', 'rt') as f:
text = f.read()
print(text)
def test_gz_file():
import bz2
with bz2.open('somefile.bz2', 'rt') as f:
text = f.read()
print(text)
def test_partial_file():
from functools import partial
RECORD_SIZE = 32
with open('somefile.data', 'rb') as f:
records = iter(partial(f.read, RECORD_SIZE), b'')
def read_into_buffer(filename):
import os.path
buf = bytearray(os.path.getsize(filename))
with open(filename, 'rb') as f:
f.readinto(buf)
return buf
def test_buffer():
with open('sample.bin', 'wb') as f:
f.write(b'Hello World')
buf = read_into_buffer('sample.bin')
print(buf)
print(buf[0:5])
m1 = memoryview(buf)
m2 = m1[-5:]
print(m2)
m2[:] = b'WORLD'
print(buf)
import os
import mmap
def memory_map(filename, access=mmap.ACCESS_WRITE):
size = os.path.getsize(filename)
fd = os.open(filename, os.O_RDWR)
return mmap.mmap(fd, size, access=access)
def test_mmap():
size = 1000000
with open('data', 'wb') as f:
f.seek(size - 1)
f.write(b'\x00')
m = memory_map('data')
print(len(m))
print(m[0:10])
print(m[0])
m[0:11] = b'Hello World'
m.close()
with open('data', 'rb') as f:
print(f.read(11))
def test_filepath():
import os
path = os.path.abspath('.')
print(os.path.basename(path))
print(os.path.dirname(path))
print(os.path.join('tmp', 'data', os.path.basename(path)))
print(os.path.expanduser(path))
print(os.path.split(path))
def test_file_exist():
print(os.path.exists('.'))
print(os.path.isfile('xt.bin'))
print(os.path.isdir(os.path.dirname(os.path.abspath('.'))))
print(os.path.islink('.'))
print(os.path.getsize('.'))
def test_file_list():
print(os.listdir('.'))
from fnmatch import fnmatch
pyfiles = [name for name in os.listdir('.') if fnmatch(name, '*.py')]
print(pyfiles)
import glob
print(glob.glob('./*.py'))
import time
name_sz_date = [(name, os.path.getsize(name), os.path.getmtime(name)) for name in pyfiles]
for name, size, mtime in name_sz_date:
try:
print(name, size, time.ctime(mtime))
except UnicodeEncodeError:
print(bad_filename(name))
def test_filename_encode():
import sys
print(sys.getfilesystemencoding())
def bad_filename(filename):
return repr(filename)[1:-1]
def test_write_bin_file():
import sys
sys.stdout.buffer.write(b'Hello\n')
def test_tempfile():
from tempfile import TemporaryFile
from tempfile import NamedTemporaryFile
with TemporaryFile('w+t') as f:
f.write('Hello World')
f.write('Testing\n')
f.seek(0)
data = f.read()
with NamedTemporaryFile('w+t') as f:
print('filename is:', f.name)
def test_serial():
import pickle
data = 'Hello, World'
f = open('somefile', 'wb')
pickle.dump(data, f)
f = open('somefile', 'rb')
data = pickle.load(f)
print(data)
f = open('somedata', 'wb')
pickle.dump([1, 2, 3, 4], f)
pickle.dump('hello', f)
pickle.dump({'Apple', 'Pear', 'Banana'}, f)
f.close()
f = open('somedata', 'rb')
print(pickle.load(f))
print(pickle.load(f))
print(pickle.load(f))
def test_countdown():
from class_object import countdown
c = countdown.Countdown(30)
print(c)
f = open('cstate.p', 'wb')
import pickle
pickle.dump(c, f)
f.close()
f = open('cstate.p', 'rb')
print(pickle.load(f))
if __name__ == '__main__':
test_countdown() | mit |
admire93/youtube-dl | youtube_dl/extractor/facebook.py | 74 | 7339 | from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
limit_length,
urlencode_postdata,
get_element_by_id,
clean_html,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:\w+\.)?facebook\.com/
(?:[^#]*?\#!/)?
(?:
(?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
(?:v|video_id)=|
[^/]+/videos/(?:[^/]+/)?
)
(?P<id>[0-9]+)
(?:.*)'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
'uploader': 'Asif Nawab Butt',
},
'expected_warnings': [
'title'
]
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}]
def _login(self):
(useremail, password) = self._get_login_info()
if useremail is None:
return
login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
login_page_req.add_header('Cookie', 'locale=en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return
check_form = {
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
'h': self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
'name_action_selected': 'dont_save',
}
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % compat_str(err))
return
def _real_initialize(self):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
webpage = self._download_webpage(url, video_id)
BEFORE = '{swf.addParam(param[0], param[1]);});\n'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
if not m:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
else:
raise ExtractorError('Cannot parse data')
data = dict(json.loads(m.group(1)))
params_raw = compat_urllib_parse_unquote(data['params'])
params = json.loads(params_raw)
video_data = params['video_data'][0]
formats = []
for quality in ['sd', 'hd']:
src = video_data.get('%s_src' % quality)
if src is not None:
formats.append({
'format_id': quality,
'url': src,
})
if not formats:
raise ExtractorError('Cannot find video formats')
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', fatal=False)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
return {
'id': video_id,
'title': video_title,
'formats': formats,
'duration': int_or_none(video_data.get('video_duration')),
'thumbnail': video_data.get('thumbnail_src'),
'uploader': uploader,
}
| unlicense |
trishnaguha/pkgdb2 | tests/test_package_listing.py | 4 | 11765 | # -*- coding: utf-8 -*-
#
# Copyright © 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
pkgdb tests for the Package object.
'''
__requires__ = ['SQLAlchemy >= 0.8']
import pkg_resources
import unittest
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
from pkgdb2.lib import model
from tests import (Modeltests, create_package_listing, create_package_acl,
create_package_critpath)
class PackageListingtests(Modeltests):
""" PackageListing tests. """
def test_init_package_listing(self):
""" Test the __init__ function of PackageListing. """
create_package_listing(self.session)
pkg = model.Package.by_name(self.session, 'guake')
self.assertEqual(
2,
len(model.PackageListing.by_package_id(
self.session, pkg.id))
)
def test_repr_package_listing(self):
""" Test the __repr__ function of PackageListing. """
create_package_listing(self.session)
pkg = model.Package.by_name(self.session, 'guake')
packages = model.PackageListing.by_package_id(
self.session, pkg.id)
self.assertEqual("PackageListing(id:1, u'pingou', "
"u'Approved', packageid=1, collectionid=2)",
packages[0].__repr__())
def test_search_listing(self):
""" Test the search function of PackageListing. """
create_package_listing(self.session)
collection = model.Collection.by_name(self.session, 'f18')
packages = model.PackageListing.search(self.session,
pkg_name='g%',
clt_id=collection.id,
pkg_owner=None,
pkg_status=None)
self.assertEqual(2, len(packages))
self.assertEqual("PackageListing(id:1, u'pingou', "
"u'Approved', packageid=1, collectionid=2)",
packages[0].__repr__())
packages = model.PackageListing.search(self.session,
pkg_name='g%',
clt_id=collection.id,
pkg_owner='pingou',
pkg_status=None)
self.assertEqual(2, len(packages))
self.assertEqual("PackageListing(id:1, u'pingou', "
"u'Approved', packageid=1, collectionid=2)",
packages[0].__repr__())
self.assertEqual("PackageListing(id:6, u'pingou', "
"u'Approved', packageid=3, collectionid=2)",
packages[1].__repr__())
packages = model.PackageListing.search(self.session,
pkg_name='g%',
clt_id=collection.id,
pkg_owner='pingou',
pkg_status='Approved')
self.assertEqual(2, len(packages))
self.assertEqual("PackageListing(id:1, u'pingou', "
"u'Approved', packageid=1, collectionid=2)",
packages[0].__repr__())
self.assertEqual("PackageListing(id:6, u'pingou', "
"u'Approved', packageid=3, collectionid=2)",
packages[1].__repr__())
packages = model.PackageListing.search(self.session,
pkg_name='g%',
clt_id=collection.id,
pkg_owner='pingou',
pkg_status='Approved',
count=True)
self.assertEqual(2, packages)
packages = model.PackageListing.search(self.session,
pkg_name='g%',
clt_id=collection.id,
pkg_owner='pingou',
pkg_status='Approved',
limit=1)
self.assertEqual("PackageListing(id:1, u'pingou', "
"u'Approved', packageid=1, collectionid=2)",
packages[0].__repr__())
packages = model.PackageListing.search(self.session,
pkg_name='g%',
clt_id=collection.id,
pkg_owner='pingou',
pkg_status='Approved',
critpath=False,
offset=1)
self.assertEqual(len(packages), 1)
self.assertEqual("PackageListing(id:6, u'pingou', "
"u'Approved', packageid=3, collectionid=2)",
packages[0].__repr__())
packages = model.PackageListing.search(self.session,
pkg_name='g%',
clt_id=collection.id,
pkg_owner='pingou',
pkg_status='Approved',
critpath=True,
offset=1)
self.assertEqual(len(packages), 0)
def test_to_json(self):
""" Test the to_json function of PackageListing. """
create_package_listing(self.session)
pkg = model.Package.by_name(self.session, 'guake')
package = model.PackageListing.by_package_id(self.session,
pkg.id)[0]
package = package.to_json()
self.assertEqual(
set(package.keys()),
set(['status', 'package', 'status_change', 'collection',
'point_of_contact', 'critpath']))
def test_search_packagers(self):
""" Test the search_packagers function of PackageListing. """
pkg = model.PackageListing.search_packagers(
self.session, 'pin%')
self.assertEqual(pkg, [])
create_package_acl(self.session)
pkg = model.PackageListing.search_packagers(
self.session, 'pi%')
self.assertEqual(len(pkg), 1)
self.assertEqual(pkg[0][0], 'pingou')
pkg = model.PackageListing.search_packagers(
self.session, 'pi%', count=True)
self.assertEqual(pkg, 1)
pkg = model.PackageListing.search_packagers(
self.session, 'pi%', offset=1)
self.assertEqual(pkg, [])
pkg = model.PackageListing.search_packagers(
self.session, 'pi%', limit=1)
self.assertEqual(len(pkg), 1)
def test_by_collectionid(self):
""" Test the by_collectionid method of PackageListing. """
create_package_acl(self.session)
# Collection 2 == f18
pkg_list = model.PackageListing.by_collectionid(self.session, 2)
self.assertEqual(len(pkg_list), 3)
self.assertEqual(pkg_list[0].collection.branchname, 'f18')
self.assertEqual(pkg_list[1].collection.branchname, 'f18')
self.assertEqual(pkg_list[2].collection.branchname, 'f18')
# Collection 3 == master
pkg_list = model.PackageListing.by_collectionid(self.session, 3)
self.assertEqual(len(pkg_list), 4)
self.assertEqual(pkg_list[0].collection.branchname, 'master')
self.assertEqual(pkg_list[1].collection.branchname, 'master')
self.assertEqual(pkg_list[2].collection.branchname, 'master')
self.assertEqual(pkg_list[3].collection.branchname, 'master')
def test_branch(self):
""" Test the branch method of PackageListing. """
create_package_acl(self.session)
pkg = model.Package.by_name(self.session, 'guake')
pkg_list = model.PackageListing.by_package_id(
self.session, pkg.id)
self.assertEqual(len(pkg_list), 2)
self.assertEqual(pkg_list[0].collection.branchname, 'f18')
self.assertEqual(len(pkg_list[0].acls), 2)
self.assertEqual(pkg_list[1].collection.branchname, 'master')
self.assertEqual(len(pkg_list[1].acls), 5)
# Create a new collection
new_collection = model.Collection(
name='Fedora',
version='19',
status='Active',
owner='toshio',
branchname='f19',
dist_tag='.fc19',
)
self.session.add(new_collection)
self.session.commit()
# Branch guake from master to f19
pkg_list[1].branch(self.session, new_collection)
pkg_list = model.PackageListing.by_package_id(
self.session, pkg.id)
self.assertEqual(len(pkg_list), 3)
self.assertEqual(pkg_list[0].collection.branchname, 'f18')
self.assertEqual(pkg_list[1].collection.branchname, 'master')
self.assertEqual(len(pkg_list[1].acls), 5)
self.assertEqual(pkg_list[2].collection.branchname, 'f19')
self.assertEqual(len(pkg_list[2].acls), 5)
def test_get_critpath_packages(self):
""" Test the get_critpath_packages method of PackageListing. """
create_package_acl(self.session)
pkg_list = model.PackageListing.get_critpath_packages(self.session)
self.assertEqual(pkg_list, [])
pkg_list = model.PackageListing.get_critpath_packages(
self.session, branch='master')
self.assertEqual(pkg_list, [])
create_package_critpath(self.session)
pkg_list = model.PackageListing.get_critpath_packages(self.session)
self.assertEqual(len(pkg_list), 2)
self.assertEqual(
pkg_list[0].point_of_contact, "kernel-maint")
self.assertEqual(
pkg_list[0].collection.branchname, "f18")
self.assertEqual(
pkg_list[1].point_of_contact, "group::kernel-maint")
self.assertEqual(
pkg_list[1].collection.branchname, "master")
pkg_list = model.PackageListing.get_critpath_packages(
self.session, branch='master')
self.assertEqual(len(pkg_list), 1)
self.assertEqual(
pkg_list[0].point_of_contact, "group::kernel-maint")
self.assertEqual(
pkg_list[0].collection.branchname, "master")
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(PackageListingtests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-2.0 |
irwinlove/scrapy | scrapy/spidermiddlewares/offsite.py | 56 | 2119 | """
Offsite Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import re
import logging
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
logger = logging.getLogger(__name__)
class OffsiteMiddleware(object):
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_spider_output(self, response, result, spider):
for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug("Filtered offsite request to %(domain)r: %(request)s",
{'domain': domain, 'request': x}, extra={'spider': spider})
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider):
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return bool(regex.search(host))
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
regex = r'^(.*\.)?(%s)$' % '|'.join(re.escape(d) for d in allowed_domains if d is not None)
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
| bsd-3-clause |
jonyroda97/redbot-amigosprovaveis | lib/pip/_vendor/distlib/util.py | 24 | 59494 | #
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote, urlparse)
logger = logging.getLogger(__name__)
#
# Requirement parsing code as per PEP 508
#
IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
OR = re.compile(r'^or\b\s*')
AND = re.compile(r'^and\b\s*')
NON_SPACE = re.compile(r'(\S+)\s*')
STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
def parse_marker(marker_string):
"""
Parse a marker string and return a dictionary containing a marker expression.
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
the expression grammar, or strings. A string contained in quotes is to be
interpreted as a literal string, and a string not contained in quotes is a
variable (such as os_name).
"""
def marker_var(remaining):
# either identifier, or literal string
m = IDENTIFIER.match(remaining)
if m:
result = m.groups()[0]
remaining = remaining[m.end():]
elif not remaining:
raise SyntaxError('unexpected end of input')
else:
q = remaining[0]
if q not in '\'"':
raise SyntaxError('invalid expression: %s' % remaining)
oq = '\'"'.replace(q, '')
remaining = remaining[1:]
parts = [q]
while remaining:
# either a string chunk, or oq, or q to terminate
if remaining[0] == q:
break
elif remaining[0] == oq:
parts.append(oq)
remaining = remaining[1:]
else:
m = STRING_CHUNK.match(remaining)
if not m:
raise SyntaxError('error in string literal: %s' % remaining)
parts.append(m.groups()[0])
remaining = remaining[m.end():]
else:
s = ''.join(parts)
raise SyntaxError('unterminated string: %s' % s)
parts.append(q)
result = ''.join(parts)
remaining = remaining[1:].lstrip() # skip past closing quote
return result, remaining
def marker_expr(remaining):
if remaining and remaining[0] == '(':
result, remaining = marker(remaining[1:].lstrip())
if remaining[0] != ')':
raise SyntaxError('unterminated parenthesis: %s' % remaining)
remaining = remaining[1:].lstrip()
else:
lhs, remaining = marker_var(remaining)
while remaining:
m = MARKER_OP.match(remaining)
if not m:
break
op = m.groups()[0]
remaining = remaining[m.end():]
rhs, remaining = marker_var(remaining)
lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
result = lhs
return result, remaining
def marker_and(remaining):
lhs, remaining = marker_expr(remaining)
while remaining:
m = AND.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_expr(remaining)
lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
def marker(remaining):
lhs, remaining = marker_and(remaining)
while remaining:
m = OR.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_and(remaining)
lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
return marker(marker_string)
def parse_requirement(req):
"""
Parse a requirement passed in as a string. Return a Container
whose attributes contain the various parts of the requirement.
"""
remaining = req.strip()
if not remaining or remaining.startswith('#'):
return None
m = IDENTIFIER.match(remaining)
if not m:
raise SyntaxError('name expected: %s' % remaining)
distname = m.groups()[0]
remaining = remaining[m.end():]
extras = mark_expr = versions = uri = None
if remaining and remaining[0] == '[':
i = remaining.find(']', 1)
if i < 0:
raise SyntaxError('unterminated extra: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
extras = []
while s:
m = IDENTIFIER.match(s)
if not m:
raise SyntaxError('malformed extra: %s' % s)
extras.append(m.groups()[0])
s = s[m.end():]
if not s:
break
if s[0] != ',':
raise SyntaxError('comma expected in extras: %s' % s)
s = s[1:].lstrip()
if not extras:
extras = None
if remaining:
if remaining[0] == '@':
# it's a URI
remaining = remaining[1:].lstrip()
m = NON_SPACE.match(remaining)
if not m:
raise SyntaxError('invalid URI: %s' % remaining)
uri = m.groups()[0]
t = urlparse(uri)
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not (t.scheme and t.netloc):
raise SyntaxError('Invalid URL: %s' % uri)
remaining = remaining[m.end():].lstrip()
else:
def get_versions(ver_remaining):
"""
Return a list of operator, version tuples if any are
specified, else None.
"""
m = COMPARE_OP.match(ver_remaining)
versions = None
if m:
versions = []
while True:
op = m.groups()[0]
ver_remaining = ver_remaining[m.end():]
m = VERSION_IDENTIFIER.match(ver_remaining)
if not m:
raise SyntaxError('invalid version: %s' % ver_remaining)
v = m.groups()[0]
versions.append((op, v))
ver_remaining = ver_remaining[m.end():]
if not ver_remaining or ver_remaining[0] != ',':
break
ver_remaining = ver_remaining[1:].lstrip()
m = COMPARE_OP.match(ver_remaining)
if not m:
raise SyntaxError('invalid constraint: %s' % ver_remaining)
if not versions:
versions = None
return versions, ver_remaining
if remaining[0] != '(':
versions, remaining = get_versions(remaining)
else:
i = remaining.find(')', 1)
if i < 0:
raise SyntaxError('unterminated parenthesis: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
# As a special diversion from PEP 508, allow a version number
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
# is allowed in earlier PEPs)
if COMPARE_OP.match(s):
versions, _ = get_versions(s)
else:
m = VERSION_IDENTIFIER.match(s)
if not m:
raise SyntaxError('invalid constraint: %s' % s)
v = m.groups()[0]
s = s[m.end():].lstrip()
if s:
raise SyntaxError('invalid constraint: %s' % s)
versions = [('~=', v)]
if remaining:
if remaining[0] != ';':
raise SyntaxError('invalid requirement: %s' % remaining)
remaining = remaining[1:].lstrip()
mark_expr, remaining = parse_marker(remaining)
if remaining and remaining[0] != '#':
raise SyntaxError('unexpected trailing data: %s' % remaining)
if not versions:
rs = distname
else:
rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
return Container(name=distname, extras=extras, constraints=versions,
marker=mark_expr, url=uri, requirement=rs)
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(root, path):
# normalizes and returns a lstripped-/-separated path
root = root.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(root)
return path[len(root):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs['encoding'] = 'utf-8'
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
class SubprocessMixin(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
| gpl-3.0 |
microsoft/EconML | econml/drlearner.py | 1 | 1068 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import econml.dr as dr
from .utilities import deprecated
@deprecated("The econml.drlearner.DRLearner class has been moved to econml.dr.DRLearner; "
"an upcoming release will remove support for the old name")
class DRLearner(dr.DRLearner):
pass
@deprecated("The econml.drlearner.LinearDRLearner class has been moved to econml.dr.LinearDRLearner; "
"an upcoming release will remove support for the old name")
class LinearDRLearner(dr.LinearDRLearner):
pass
@deprecated("The econml.drlearner.SparseLinearDRLearner class has been moved to econml.dr.SparseLinearDRLearner; "
"an upcoming release will remove support for the old name")
class SparseLinearDRLearner(dr.SparseLinearDRLearner):
pass
@deprecated("The econml.drlearner.ForestDRLearner class has been moved to econml.dr.ForestDRLearner; "
"an upcoming release will remove support for the old name")
class ForestDRLearner(dr.ForestDRLearner):
pass
| mit |
andrewderekjackson/python_lcd_menu | lcd_menu/menu.py | 1 | 4202 | import os
class MenuItem(object):
'''A single menu item which can contain child menu items'''
def __init__(self, title, items=None, refresh_callback=None, refresh_callback_args = None):
self._title = title
self._items = items
self._refresh_callback = refresh_callback
self._refresh_callback_args = refresh_callback_args
@property
def title(self):
return self._title
@property
def items(self):
return self._items
def refresh(self):
if self._refresh_callback is not None:
self._items = self._refresh_callback(self, self._refresh_callback_args)
class Command(MenuItem):
'''A single menu item which executes a callback when selected'''
def __init__(self, title, command, arg=None):
MenuItem.__init__(self, title, None)
self._command = command
self._arg = arg
def invoke_command(self):
if self._command is not None:
self._command(self, self._arg)
return True
return False
def refresh(self):
pass
class MenuView(object):
'''Represents a current menu level and tracks the selected item'''
def __init__(self, items):
self._selected_index = 0
self._items = items
@property
def selected_index(self):
return self._selected_index
@selected_index.setter
def selected_index(self, val):
if val >= len(self._items):
self._selected_index = len(self._items)-1
else:
if val > 0:
self._selected_index = val
else:
self._selected_index = 0
@property
def items(self):
return self._items
def down(self):
self.selected_index += 1
def up(self):
self.selected_index -= 1
def refresh(self):
self.selected_item.refresh()
@property
def selected_item(self):
return self._items[self._selected_index]
class Menu(object):
'''Base menu controller responsible for managing the menu'''
def __init__(self, items, update):
self._history = []
self.main_menu = MenuView(items)
self.current_menu = self.main_menu
self.update = update
self.showing_menu = False
# start with the menu closed
self.close()
def menu(self):
"""
Shows the main menu
"""
self.current_menu = self.main_menu
self.showing_menu = True
self.update(self.current_menu)
def up(self):
"""
Navigates up in the menu
"""
self.current_menu.up()
self.update(self.current_menu)
def down(self):
"""
Navigates down in the menu
"""
self.current_menu.down()
self.update(self.current_menu)
def select(self):
"""
Selects the current menu. Either enters a submenu or invokes the command
"""
if isinstance(self.current_menu.selected_item, Command):
self.current_menu.selected_item.invoke_command()
return
if isinstance(self.current_menu.selected_item, MenuItem):
self.current_menu.selected_item.refresh()
if self.current_menu.selected_item.items is not None:
# add current menu to history
self._history.append(self.current_menu)
self.current_menu = MenuView(self.current_menu.selected_item.items)
self.update(self.current_menu)
def back(self):
"""
Returns back to a previous menu
"""
if len(self._history) > 0:
self.current_menu = self._history.pop()
self.update(self.current_menu)
else:
self.close()
def show(self):
"""
Shows the main menu
"""
self.current_menu = self.main_menu
self.showing_menu = True
self.update(self.current_menu)
def close(self):
"""
Closes the menu.
"""
self.current_menu = None
self.showing_menu = False
self.update(self.current_menu)
pass
def update(self):
pass
| mit |
smblance/ggplot | ggplot/tests/__init__.py | 8 | 10135 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import matplotlib.pyplot as plt
from nose.tools import with_setup, make_decorator, assert_true
import warnings
figsize_orig = mpl.rcParams["figure.figsize"]
def setup_package():
mpl.rcParams["figure.figsize"] = (11.0, 8.0)
def teardown_package():
mpl.rcParams["figure.figsize"] = figsize_orig
import os
# Testing framework shamelessly stolen from matplotlib...
# Tests which should be run with 'python tests.py' or via 'must be
# included here.
default_test_modules = [
'ggplot.tests.test_basic',
'ggplot.tests.test_readme_examples',
'ggplot.tests.test_ggplot_internals',
'ggplot.tests.test_geom',
'ggplot.tests.test_stat',
'ggplot.tests.test_stat_calculate_methods',
'ggplot.tests.test_stat_summary',
'ggplot.tests.test_geom_rect',
'ggplot.tests.test_geom_dotplot',
'ggplot.tests.test_geom_bar',
'ggplot.tests.test_qplot',
'ggplot.tests.test_geom_lines',
'ggplot.tests.test_geom_linerange',
'ggplot.tests.test_geom_pointrange',
'ggplot.tests.test_faceting',
'ggplot.tests.test_stat_function',
'ggplot.tests.test_scale_facet_wrap',
'ggplot.tests.test_scale_log',
'ggplot.tests.test_reverse',
'ggplot.tests.test_ggsave',
'ggplot.tests.test_theme_mpl',
'ggplot.tests.test_colors',
'ggplot.tests.test_chart_components',
'ggplot.tests.test_legend',
'ggplot.tests.test_element_target',
'ggplot.tests.test_element_text',
'ggplot.tests.test_theme',
'ggplot.tests.test_theme_bw',
'ggplot.tests.test_theme_gray',
'ggplot.tests.test_theme_mpl',
'ggplot.tests.test_theme_seaborn'
]
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install ggplot from source to get the '
'test data.')
def _assert_same_ggplot_image(gg, name, test_file, tol=17):
"""Asserts that the ggplot object produces the right image"""
fig = gg.draw()
return _assert_same_figure_images(fig, name, test_file, tol=tol)
class ImagesComparisonFailure(Exception):
pass
def _assert_same_figure_images(fig, name, test_file, tol=17):
"""Asserts that the figure object produces the right image"""
import os
import shutil
from matplotlib import cbook
from matplotlib.testing.compare import compare_images
from nose.tools import assert_is_not_none
if not ".png" in name:
name = name+".png"
basedir = os.path.abspath(os.path.dirname(test_file))
basename = os.path.basename(test_file)
subdir = os.path.splitext(basename)[0]
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
orig_expected_fname = os.path.join(baseline_dir, name)
actual_fname = os.path.join(result_dir, name)
def make_test_fn(fname, purpose):
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
expected_fname = make_test_fn(actual_fname, 'expected')
# Save the figure before testing whether the original image
# actually exists. This make creating new tests much easier,
# as the result image can afterwards just be copied.
fig.savefig(actual_fname)
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
raise Exception("Baseline image %s is missing" % orig_expected_fname)
err = compare_images(expected_fname, actual_fname,
tol, in_decorator=True)
if err:
msg = 'images not close: {actual:s} vs. {expected:s} (RMS {rms:.2f})'.format(**err)
raise ImagesComparisonFailure(msg)
return err
def get_assert_same_ggplot(test_file):
"""Returns a "assert_same_ggplot" function for these test file
call it like `assert_same_ggplot = get_assert_same_ggplot(__file__)`
"""
def curried(*args, **kwargs):
kwargs["test_file"] = test_file
return _assert_same_ggplot_image(*args, **kwargs)
curried.__doc__ = _assert_same_ggplot_image.__doc__
return curried
def assert_same_elements(first,second, msg=None):
assert_true(len(first) == len(second), "different length")
assert_true(all([a==b for a,b in zip(first,second)]), "Unequal: %s vs %s" % (first, second))
def image_comparison(baseline_images=None, tol=17, extensions=None):
"""
call signature::
image_comparison(baseline_images=['my_figure'], tol=17)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImagesComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions:
# ignored, only for compatibility with matplotlibs decorator!
pass
def compare_images_decorator(func):
import inspect
_file = inspect.getfile(func)
def decorated():
# make sure we don't carry over bad images from former tests.
assert len(plt.get_fignums()) == 0, "no of open figs: %s -> find the last test with ' " \
"python tests.py -v' and add a '@cleanup' decorator." % \
str(plt.get_fignums())
func()
assert len(plt.get_fignums()) == len(baseline_images), "different number of " \
"baseline_images and actuall " \
"plots."
for fignum, baseline in zip(plt.get_fignums(), baseline_images):
figure = plt.figure(fignum)
_assert_same_figure_images(figure, baseline, _file, tol=tol)
# also use the cleanup decorator to close any open figures!
return make_decorator(cleanup(func))(decorated)
return compare_images_decorator
def cleanup(func):
"""Decorator to add cleanup to the testing function
@cleanup
def test_something():
" ... "
Note that `@cleanup` is useful *only* for test functions, not for test
methods or inside of TestCase subclasses.
"""
def _teardown():
plt.close('all')
warnings.resetwarnings() #reset any warning filters set in tests
return with_setup(setup=_setup, teardown=_teardown)(func)
# This is called from the cleanup decorator
def _setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
mpl.use('Agg', warn=False) # use Agg backend for these tests
if mpl.get_backend().lower() != "agg" and mpl.get_backend().lower() != "qt4agg":
raise Exception(("Using a wrong matplotlib backend ({0}), which will not produce proper "
"images").format(mpl.get_backend()))
# These settings *must* be hardcoded for running the comparison
# tests
mpl.rcdefaults() # Start with all defaults
mpl.rcParams['text.hinting'] = True
mpl.rcParams['text.antialiased'] = True
#mpl.rcParams['text.hinting_factor'] = 8
# Clear the font caches. Otherwise, the hinting mode can travel
# from one test to another.
backend_agg.RendererAgg._fontd.clear()
backend_pdf.RendererPdf.truetype_font_cache.clear()
backend_svg.RendererSVG.fontd.clear()
# make sure we don't carry over bad plots from former tests
assert len(plt.get_fignums()) == 0, "no of open figs: %s -> find the last test with ' " \
"python tests.py -v' and add a '@cleanup' decorator." % \
str(plt.get_fignums())
# This is here to run it like "from ggplot.tests import test; test()"
def test(verbosity=1):
"""run the ggplot test suite"""
old_backend = mpl.rcParams['backend']
try:
mpl.use('agg')
import nose
import nose.plugins.builtin
from matplotlib.testing.noseclasses import KnownFailure
from nose.plugins.manager import PluginManager
from nose.plugins import multiprocess
# store the old values before overriding
plugins = []
plugins.append( KnownFailure() )
plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] )
manager = PluginManager(plugins=plugins)
config = nose.config.Config(verbosity=verbosity, plugins=manager)
# Nose doesn't automatically instantiate all of the plugins in the
# child processes, so we have to provide the multiprocess plugin with
# a list.
multiprocess._instantiate_plugins = [KnownFailure]
success = nose.run( defaultTest=default_test_modules,
config=config,
)
finally:
if old_backend.lower() != 'agg':
mpl.use(old_backend)
return success
test.__test__ = False # nose: this function is not a test
| bsd-2-clause |
xiangel/hue | apps/beeswax/src/beeswax/migrations/0009_auto__chg_field_queryhistory_server_port.py | 35 | 9498 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'QueryHistory.server_port'
db.alter_column('beeswax_queryhistory', 'server_port', self.gf('django.db.models.fields.PositiveIntegerField')())
def backwards(self, orm):
# Changing field 'QueryHistory.server_port'
db.alter_column('beeswax_queryhistory', 'server_port', self.gf('django.db.models.fields.SmallIntegerField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beeswax.metainstall': {
'Meta': {'object_name': 'MetaInstall'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_example': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'beeswax.queryhistory': {
'Meta': {'ordering': "['-submission_date']", 'object_name': 'QueryHistory'},
'design': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beeswax.SavedQuery']", 'null': 'True'}),
'has_results': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_state': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'log_context': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'modified_row_count': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'operation_type': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'query_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'server_guid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '1024', 'null': 'True'}),
'server_host': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'server_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'server_port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10000'}),
'server_type': ('django.db.models.fields.CharField', [], {'default': "'beeswax'", 'max_length': '128'}),
'statement_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'beeswax.savedquery': {
'Meta': {'ordering': "['-mtime']", 'object_name': 'SavedQuery'},
'data': ('django.db.models.fields.TextField', [], {'max_length': '65536'}),
'desc': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'mtime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'beeswax.session': {
'Meta': {'object_name': 'Session'},
'application': ('django.db.models.fields.CharField', [], {'default': "'beeswax'", 'max_length': '128'}),
'guid': ('django.db.models.fields.TextField', [], {'max_length': "'100'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'secret': ('django.db.models.fields.TextField', [], {'max_length': "'100'"}),
'server_protocol_version': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status_code': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'desktop.document': {
'Meta': {'object_name': 'Document'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'extra': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'doc_owner'", 'to': "orm['auth.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['desktop.DocumentTag']", 'db_index': 'True', 'symmetrical': 'False'}),
'version': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
'desktop.documenttag': {
'Meta': {'object_name': 'DocumentTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
}
}
complete_apps = ['beeswax'] | apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.