repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
clips/pattern | examples/08-server/02-api/api.py | 1 | 3541 | from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.server import App
from pattern.server import MINUTE, HOUR, DAY
from pattern.text import language
app = App("api")
# The language() function in pattern.text guesses the language of a given string.
# For example: language("In French, goodbye is au revoir.") returns ("en", 0.83).
# It can handle "en", "es", "de", "fr", "nl", "it" with reasonable accuracy.
# To create a web service like Google Translate with pattern.server is easy.
# Normally, URL handlers return a string with the contents of that web page.
# If we return a dictionary instead, it will be formatted as a JSON-string,
# the data interchange format used by many popular web services.
# So clients (e.g., a user's Python script) can query the web service URL
# and catch the JSON reply.
# There is only one tricky part: rate limiting.
# Note the "limit", "time" and "key" parameters in @app.route() below.
# We'll explain them in more detail.
# First, run the script and visit:
# http://127.0.0.1:8080/language?q=in+french+goodbye+is+au+revoir
# You should see some JSON-output:
# {"language": "en", "confidence": 0.83}
@app.route("/language", limit=100, time=HOUR)
def predict_language(q=""):
#print(q)
iso, confidence = language(q) # (takes some time to load the first time)
return {
"language": iso,
"confidence": round(confidence, 2)
}
# When you set up a web service, expect high traffic peaks.
# For example, a user may have 10,000 sentences
# and send them all at once in a for-loop to our web service:
# import urrlib
# import json
# for s in sentences[:10000]:
# url = "http://127.0.0.1:8080/language?q=" + s.replace(" ", "-")
# data = urllib.urlopen(url).read()
# data = json.loads(data)
# Rate limiting caps the number of allowed requests for a user.
# In this example, limit=100 and time=HOUR means up to a 100 requests/hour.
# After that, the user will get a HTTP 429 Too Many Requests error.
# The example below demonstrates how rates can be set up per user.
# In this case, only the user with key=1234 is allowed access.
# All other requests will generate a HTTP 403 Forbidden error.
# A user can pass his personal key as a query parameter, e.g.,
# http://127.0.0.1:8080/language/paid?q=hello&key=1234
# Check personal keys instead of IP-address:
@app.route("/language/paid", limit=True, key=lambda data: data.get("key"))
def predict_language_paid(q="", key=None):
return {"language": language(q)[0]}
# Create an account for user with key=1234 (do once).
# You can generate fairly safe keys with app.rate.key().
if not app.rate.get(key="1234", path="/language/paid"):
app.rate.set(key="1234", path="/language/paid", limit=10000, time=DAY)
# Try it out with the key and without the key:
# http://127.0.0.1:8080/language/paid?q=hello&key=1234
# http://127.0.0.1:8080/language/paid?q=hello (403 error)
# A rate.db SQLite database was created in the current folder.
# If you want to give it another name, use App(rate="xxx.db").
# To view the contents of the database,we use the free
# SQLite Database Browser (http://sqlitebrowser.sourceforge.net).
# If the web service is heavily used,
# we may want to use more threads for concurrent requests
# (default is 30 threads with max 20 queueing):
app.run("127.0.0.1", port=8080, threads=100, queue=50)
| bsd-3-clause | 664210a783c1158be9b17fd2bb73f660 | 36.273684 | 81 | 0.704038 | 3.324883 | false | false | false | false |
daxm/fmcapi | fmcapi/api_objects/object_services/securityzones.py | 1 | 1284 | """Security Zones Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
class SecurityZones(APIClassTemplate):
"""The SecurityZones Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"description",
"interfaceMode",
"interfaces",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/securityzones"
REQUIRED_FOR_POST = ["name", "interfaceMode"]
FILTER_BY_NAME = True
def __init__(self, fmc, **kwargs):
"""
Initialize SecurityZones object.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for SecurityZones class.")
self.parse_kwargs(**kwargs)
def parse_kwargs(self, **kwargs):
"""
Parse the kwargs and set self variables to match.
:return: None
"""
super().parse_kwargs(**kwargs)
logging.debug("In parse_kwargs() for SecurityZones class.")
if "interfaceMode" in kwargs:
self.interfaceMode = kwargs["interfaceMode"]
else:
self.interfaceMode = "ROUTED"
| bsd-3-clause | d4e78995adccc4354aea4534759ffd76 | 26.913043 | 68 | 0.584891 | 4.102236 | false | false | false | false |
daxm/fmcapi | fmcapi/api_objects/update_packages/upgradepackages.py | 1 | 1268 | """Upgrade Packages Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
class UpgradePackages(APIClassTemplate):
"""The UpgradePackages Object in the FMC."""
VALID_JSON_DATA = ["id", "name", "type"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/updates/upgradepackages"
def __init__(self, fmc, **kwargs):
"""
Initialize UpgradePackages object.
Set self.type to "UpgradePackage", parse the kwargs, and set up the self.URL.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for UpgradePackages class.")
self.type = "UpgradePackage"
self.URL = f"{self.fmc.platform_url}{self.URL_SUFFIX}"
self.parse_kwargs(**kwargs)
def post(self):
"""POST method for API for UpgradePackages not supported."""
logging.info("POST method for API for UpgradePackages not supported.")
pass
def put(self):
"""PUT method for API for UpgradePackages not supported."""
logging.info("PUT method for API for UpgradePackages not supported.")
pass
| bsd-3-clause | 6eccf3d38db9a562b92bba6fcefacae9 | 32.368421 | 85 | 0.636435 | 4.090323 | false | false | false | false |
daxm/fmcapi | setup.py | 1 | 1947 | from setuptools import setup, find_packages
__author__ = "Dax Mickelson"
__author_email = "dmickels@cisco.com"
__license__ = "BSD"
setup(
name="fmcapi",
version="20210802.0",
description="Easier interface to Cisco's FMC API than writing your own way.",
long_description="""With the removal to configure a Cisco NGFW via the command line your only option is to
do so via a manager. Some things are better when automated so using the manager's API gives us that power.
However, the current way to write external scripts and interact with the FMC's API isn't that great. We created
this "wrapper" to give you an easier to use way of communicating with the FMC's API than using the example code
provided in the API Explorer.""",
url="https://github.com/daxm/fmcapi",
author="Dax Mickelson",
author_email="dmickels@cisco.com",
license="BSD",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Plugins",
"Intended Audience :: System Administrators",
"Intended Audience :: Developers",
"Intended Audience :: Other Audience",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
"Operating System :: Microsoft",
"Programming Language :: Python :: 3",
"Topic :: Security",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Networking :: Firewalls",
"Topic :: System :: Systems Administration",
"Topic :: Utilities",
],
keywords="fmcapi fmc ftd security cisco ngfw api firepower",
packages=find_packages(exclude=["docs", "tests*"]),
install_requires=["requests", "datetime", "ipaddress"],
python_requires=">=3.6",
package_data={},
data_files=None,
)
| bsd-3-clause | e68b0f1041b7cfa7ca890ca78d573083 | 42.266667 | 118 | 0.646636 | 4.142553 | false | false | true | false |
conda/conda-concourse-ci | conda_concourse_ci/concourse.py | 1 | 4478 | import json
import logging
import subprocess
from contextlib import AbstractContextManager
class Concourse(AbstractContextManager):
"""
A class for interacting with a Concourse CI instance
Uses fly for interactions, a compatible version must be installed and on
path.
This can be used as a context manager with login/logout. For example:
with Concourse(url, username, password) as con:
for pipeline in con.pipelines:
print(pipeline)
Parameters
----------
concourse_url : str
The URL of the Concourse CI server
username : str, optional
Concourse username.
password : str, optional
Password for user.
team_name : str, optional
Team to autheticate with.
target : str, optional
Concourse target name
"""
def __init__(
self,
concourse_url,
username=None,
password=None,
team_name=None,
target='conda-concourse-server'
):
self.concourse_url = concourse_url
self.username = username
self.password = password
self.team_name = team_name
self.target = target
def __enter__(self):
self.login()
return self
def __exit__(self, *exc_details):
self.logout()
def _fly(self, fly_args, check=True):
""" Run a fly command with the stored target """
args = ['fly', '-t', self.target] + fly_args
logging.debug('command: ' + ' '.join(args))
complete = subprocess.run(args, capture_output=True)
logging.debug('returncode: ' + str(complete.returncode))
logging.debug('stdout: ' + complete.stdout.decode('utf-8'))
logging.debug('stderr: ' + complete.stderr.decode('utf-8'))
if check:
complete.check_returncode()
return complete
def _flyj(self, fly_args, check=True):
""" Return the deserialized json output from a fly command. """
complete = self._fly(fly_args=fly_args + ['--json'], check=check)
return json.loads(complete.stdout)
def login(self):
fly_args = ['login', '--concourse-url', self.concourse_url]
if self.team_name is not None:
fly_args.extend(['--team-name', self.team_name])
if self.username is not None:
fly_args.extend(['--username', self.username])
if self.password is not None:
fly_args.extend(['--password', self.password])
self._fly(fly_args)
def logout(self):
self._fly(["logout"])
def sync(self):
self._fly(['sync'])
def set_pipeline(self, pipeline, config_file, vars_path):
self._fly([
"set-pipeline",
"--non-interactive",
"--pipeline", pipeline,
"--config", config_file,
"--load-vars-from", vars_path
])
def expose_pipeline(self, pipeline):
self._fly(['expose-pipeline', '--pipeline', pipeline])
def destroy_pipeline(self, pipeline):
self._fly([
'destroy-pipeline',
'--non-interactive',
'--pipeline', pipeline,
])
def pause_pipeline(self, pipeline):
self._fly([
'pause-pipeline',
'--pipeline', pipeline,
])
def unpause_pipeline(self, pipeline):
self._fly([
'unpause-pipeline',
'--pipeline', pipeline,
])
@property
def pipelines(self):
""" A list of pipelines names """
return [i['name'] for i in self._flyj(['pipelines'])]
def get_jobs(self, pipeline):
return self._flyj(['jobs', '-p', pipeline])
def get_builds(self, pipeline):
return self._flyj(['builds', '--pipeline', pipeline])
def status_of_jobs(self, pipeline):
statuses = {}
jobs = self.get_jobs(pipeline)
for job in jobs:
name = job.get('name', 'unknown')
build = job.get('finished_build', None)
if build:
status = build.get('status', 'n/a')
else:
status = 'n/a'
statuses[name] = status
return statuses
def trigger_job(self, pipeline, job):
self._fly(['trigger-job', '--job', f'{pipeline}/{job}'])
def abort_build(self, pipeline, job, name):
self._fly([
'abort-build',
'--job', f'{pipeline}/{job}',
'--build', name
])
| bsd-3-clause | f93ff6f4e5d474e5ad2c8c8c8151dd4c | 28.460526 | 76 | 0.553149 | 4.212606 | false | false | false | false |
adblockplus/gyp | pylib/gyp/generator/xcode.py | 16 | 58863 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'mac_xcuitest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
generator_filelist_paths = None
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def CalculateGeneratorInputInfo(params):
toplevel = params['options'].toplevel_dir
if params.get('flavor') == 'ninja':
generator_dir = os.path.relpath(params['options'].generator_output or '.')
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
output_dir = os.path.normpath(os.path.join(generator_dir, output_dir))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles-xcode-ninja'))
else:
output_dir = os.path.normpath(os.path.join(toplevel, 'xcodebuild'))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
upgrade_check_project_version = \
generator_flags.get('xcode_upgrade_check_project_version', None)
# Format upgrade_check_project_version with leading zeros as needed.
if upgrade_check_project_version:
upgrade_check_project_version = str(upgrade_check_project_version)
while len(upgrade_check_project_version) < 4:
upgrade_check_project_version = '0' + upgrade_check_project_version
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
# Set project-level attributes from multiple options
project_attributes = {};
if parallel_builds:
project_attributes['BuildIndependentTargetsInParallel'] = 'YES'
if upgrade_check_project_version:
project_attributes['LastUpgradeCheck'] = upgrade_check_project_version
project_attributes['LastTestingUpgradeCheck'] = \
upgrade_check_project_version
project_attributes['LastSwiftUpdateCheck'] = \
upgrade_check_project_version
pbxp.SetProperty('attributes', project_attributes)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'mac_kernel_extension': 'com.apple.product-type.kernel-extension',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'loadable_module+xcuitest': 'com.apple.product-type.bundle.ui-testing',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle':
'com.apple.product-type.application.watchapp',
'mac_kernel_extension+bundle': 'com.apple.product-type.kernel-extension',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_xcuitest = int(spec.get('mac_xcuitest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xcuitest:
type_bundle_key += '+xcuitest'
assert type == 'loadable_module', (
'mac_xcuitest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xcuitest, (
'mac_xcuitest_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
code_sign = int(copy_group.get('xcode_code_sign', 0))
settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file, settings)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| bsd-3-clause | 91b3aeead89284e784a7b3f0698f82f3 | 43.899314 | 80 | 0.657799 | 4.044733 | false | true | false | false |
adblockplus/gyp | test/standalone-static-library/gyptest-standalone-static-library.py | 47 | 1747 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of a static_library with the standalone_static_library flag set.
"""
import os
import subprocess
import sys
import TestGyp
# standalone_static_library currently means two things: a specific output
# location for the built target and non-thin archive files.
test = TestGyp.TestGyp()
# Verify that types other than static_library cause a failure.
test.run_gyp('invalid.gyp', status=1, stderr=None)
target_str = 'invalid.gyp:bad#target'
err = ['gyp: Target %s has type executable but standalone_static_library flag '
'is only valid for static_library type.' % target_str]
test.must_contain_all_lines(test.stderr(), err)
# Build a valid standalone_static_library.
test.run_gyp('mylib.gyp')
test.build('mylib.gyp', target='prog')
# Verify that the static library is copied to the correct location.
# We expect the library to be copied to $PRODUCT_DIR.
standalone_static_library_dir = test.EXECUTABLE
path_to_lib = os.path.split(
test.built_file_path('mylib', type=standalone_static_library_dir))[0]
lib_name = test.built_file_basename('mylib', type=test.STATIC_LIB)
path = os.path.join(path_to_lib, lib_name)
test.must_exist(path)
# Verify that the program runs properly.
expect = 'hello from mylib.c\n'
test.run_built_executable('prog', stdout=expect)
# Verify that libmylib.a contains symbols. "ar -x" fails on a 'thin' archive.
supports_thick = ('make', 'ninja', 'cmake')
if test.format in supports_thick and sys.platform.startswith('linux'):
retcode = subprocess.call(['ar', '-x', path])
assert retcode == 0
test.pass_test()
| bsd-3-clause | 8f02cf7ef488fff9c110dff540f7c642 | 33.94 | 79 | 0.737836 | 3.366089 | false | true | false | false |
adblockplus/gyp | test/lib/TestWin.py | 87 | 3168 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestWin.py: a collection of helpers for testing on Windows.
"""
import errno
import os
import re
import sys
import subprocess
class Registry(object):
def _QueryBase(self, sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def Query(self, key, value=None):
r"""Use reg.exe to read a particular key through _QueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = self._QueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = self._QueryBase('System32', key, value)
else:
raise
return text
def GetValue(self, key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = self.Query(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def KeyExists(self, key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not self.Query(key):
return False
return True
| bsd-3-clause | 50813975684abbe5f45ff6353e58bb17 | 30.366337 | 80 | 0.65846 | 3.854015 | false | false | false | false |
adblockplus/gyp | pylib/gyp/generator/dump_dependency_json.py | 1523 | 3426 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| bsd-3-clause | 4ec7046093339005326a345e0550da53 | 33.606061 | 77 | 0.697607 | 3.840807 | false | false | false | false |
dbcli/vcli | vcli/vexecute.py | 1 | 10588 | import logging
import socket
import sys
import sqlparse
import vertica_python as vertica
from sqlparse.tokens import Token as _Token
from sqlparse.sql import Token
from .packages import vspecial as special
from .encodingutils import PY2
_logger = logging.getLogger(__name__)
class VExecute(object):
# The boolean argument to the current_schemas function indicates whether
# implicit schemas, e.g. pg_catalog
search_path_query = '''
SELECT current_schemas(true)'''
schemata_query = '''
SELECT schema_name
FROM v_catalog.schemata
ORDER BY 1'''
tables_query = '''
SELECT table_schema, table_name
FROM v_catalog.tables
ORDER BY 1, 2'''
views_query = '''
SELECT table_schema, table_name
FROM v_catalog.views
ORDER BY 1, 2'''
table_columns_query = '''
SELECT table_schema, table_name, column_name
FROM v_catalog.columns
ORDER BY 1, 2, 3'''
view_columns_query = '''
SELECT table_schema, table_name, column_name
FROM v_catalog.view_columns
ORDER BY 1, 2, 3'''
functions_query = '''
SELECT schema_name, function_name
FROM v_catalog.user_functions
WHERE schema_name NOT IN ('v_catalog', 'v_monitor', 'v_internal')
ORDER BY 1, 2'''
databases_query = '''
SELECT database_name, owner_id, 'UTF8' AS encoding,
'en_US.utf8' AS collate, 'en_US.utf8' AS ctype
FROM v_catalog.databases
ORDER BY 1'''
datatypes_query = '''
SELECT schema_name, type_name
FROM v_catalog.types, v_catalog.schemata
WHERE schema_name NOT IN ('v_catalog', 'v_monitor', 'v_internal')
ORDER BY 1, 2'''
def __init__(self, database, user, password, host, port):
self.dbname = database
self.user = user
self.password = password
self.host = host
self.port = port
self.connect()
def connect(self, database=None, user=None, password=None, host=None,
port=None):
db = (database or self.dbname)
user = (user or self.user)
password = (password or self.password)
host = (host or self.host)
port = (port or self.port)
conn = vertica.connect(database=db, user=user, password=password,
host=host, port=int(port))
# Print notice message for PROFILE (#42)
def print_notice(message):
print('%(Severity)s: %(Message)s' % message.values)
hint = message.values.get('Hint')
if hint:
print('HINT: ' + hint)
conn.notice_handler = print_notice
# HACK: Modify vertica_python's connection socket to do keep alive
# TODO: Keep alive for Windows and other platforms
# http://stackoverflow.com/questions/12248132/how-to-change-tcp-keepalive-timer-using-python-script
sock = conn._socket()
if sys.platform == 'darwin': # Mac OS X
tcp_keepalive = 0x10
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, tcp_keepalive, 60)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 10)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
elif sys.platform.startswith('linux'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 10)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
if hasattr(self, 'conn'):
self.conn.close()
self.conn = conn
# self.conn.autocommit = True
self.dbname = db
self.user = user
self.password = password
self.host = host
self.port = port
# register_json_typecasters(self.conn, self._json_typecaster)
# register_hstore_typecaster(self.conn)
def _json_typecaster(self, json_data):
"""Interpret incoming JSON data as a string.
The raw data is decoded using the connection's encoding, which defaults
to the database's encoding.
See http://initd.org/psycopg/docs/connection.html#connection.encoding
"""
if PY2:
return json_data.decode(self.conn.encoding)
else:
return json_data
def run(self, statement, vspecial=None):
"""Execute the sql in the database and return the results.
:param statement: A string containing one or more sql statements
:param vspecial: VSpecial object
:return: List of tuples containing (title, rows, headers, status,
force_stdout)
"""
# Remove spaces and EOL
statement = statement.strip()
if not statement: # Empty string
yield (None, None, None, None, True)
# Split the sql into separate queries and run each one.
for sql in sqlparse.split(statement):
# Remove spaces, eol and semi-colons.
sql = sql.rstrip(';')
if vspecial:
# First try to run each query as special
try:
_logger.debug('Trying a vspecial command. sql: %r', sql)
cur = self.conn.cursor()
for result in vspecial.execute(cur, sql):
yield result
return
except special.CommandNotFound:
pass
yield self.execute_normal_sql(sql)
def execute_normal_sql(self, split_sql):
_logger.debug('Regular sql statement. sql: %r', split_sql)
cur = self.conn.cursor()
try:
tree = sqlparse.parse(split_sql)[0]
except IndexError:
return (None, None, None, None, True)
if _is_copy_from_local(tree):
_execute_copy_from_local_sql(tree, cur)
else:
cur.execute(split_sql)
title = None
statusmessage = None
first_token = split_sql.split()[0].lower()
if cur.description and first_token in ('select', 'update', 'delete', 'with',
'insert', 'explain', 'profile'):
headers = [x[0] for x in cur.description]
return (title, cur, headers, statusmessage, False)
else:
_logger.debug('No rows in result.')
return (title, None, None, statusmessage, True)
def search_path(self):
"""Returns the current search path as a list of schema names"""
with self.conn.cursor() as cur:
_logger.debug('Search path query. sql: %r', self.search_path_query)
cur.execute(self.search_path_query)
names = cur.fetchone()[0]
return names.split(b',')
def schemata(self):
"""Returns a list of schema names in the database"""
with self.conn.cursor() as cur:
_logger.debug('Schemata Query. sql: %r', self.schemata_query)
cur.execute(self.schemata_query)
return (x[0] for x in cur.fetchall())
def tables(self):
"""Yields (schema_name, table_name) tuples"""
with self.conn.cursor() as cur:
_logger.debug('Tables Query. sql: %r', self.tables_query)
cur.execute(self.tables_query)
for row in cur.iterate():
yield tuple(row)
def views(self):
"""Yields (schema_name, view_name) tuples.
Includes both views and and materialized views
"""
with self.conn.cursor() as cur:
_logger.debug('Views Query. sql: %r', self.views_query)
cur.execute(self.views_query)
for row in cur.iterate():
yield tuple(row)
def table_columns(self):
with self.conn.cursor() as cur:
_logger.debug('Columns Query. sql: %r', self.table_columns_query)
cur.execute(self.table_columns_query)
for row in cur.iterate():
yield tuple(row)
def view_columns(self):
with self.conn.cursor() as cur:
_logger.debug('Columns Query. sql: %r', self.view_columns_query)
cur.execute(self.view_columns_query)
for row in cur.iterate():
yield tuple(row)
def databases(self):
with self.conn.cursor() as cur:
_logger.debug('Databases Query. sql: %r', self.databases_query)
cur.execute(self.databases_query)
return [x[0] for x in cur.fetchall()]
def functions(self):
"""Yields tuples of (schema_name, function_name)"""
with self.conn.cursor() as cur:
_logger.debug('Functions Query. sql: %r', self.functions_query)
cur.execute(self.functions_query)
for row in cur.iterate():
yield tuple(row)
def datatypes(self):
"""Yields tuples of (schema_name, type_name)"""
with self.conn.cursor() as cur:
_logger.debug('Datatypes Query. sql: %r', self.datatypes_query)
cur.execute(self.datatypes_query)
for row in cur.iterate():
yield tuple(row)
def _is_copy_from_local(sql_tree):
first_token = sql_tree.tokens[0]
if not (first_token.is_keyword and first_token.value.lower() == 'copy'):
return False
# Search for 'LOCAL' keyword
found = False
for i, token in enumerate(sql_tree.tokens):
if token.is_keyword and token.value.lower() == 'local':
found = True
break
if not found:
return False
# After 'LCOAL', there should be a whitespace then a file path
try:
token = sql_tree.tokens[i + 2]
except IndexError:
return False
return token.ttype is _Token.Literal.String.Single
def _execute_copy_from_local_sql(sql_tree, cursor):
# Search for 'LOCAL' keyword
for i, token in enumerate(sql_tree.tokens):
if token.is_keyword and token.value.lower() == 'local':
break
file_path = sql_tree.tokens[i + 2].value.strip('\'"')
# Replace "LOCAL <file_path>" with "stdin"
sql_tree.tokens = sql_tree.tokens[0:i] + [
Token(_Token.Keyword, 'stdin')
] + sql_tree.tokens[i + 3:]
new_sql = sql_tree.to_unicode()
cursor.flush_to_query_ready()
with open(file_path, 'rb') as f:
cursor.copy(new_sql, f)
cursor.flush_to_query_ready()
| bsd-3-clause | 768f752e03cc0567855ec75d22d8bba0 | 33.376623 | 107 | 0.579146 | 3.953697 | false | false | false | false |
dbcli/vcli | vcli/vbuffer.py | 1 | 1105 | from prompt_toolkit.buffer import Buffer
from prompt_toolkit.filters import Condition
class VBuffer(Buffer):
def __init__(self, always_multiline, *args, **kwargs):
self.always_multiline = always_multiline
@Condition
def is_multiline():
doc = self.document
return self.always_multiline and not _multiline_exception(doc.text)
super(self.__class__, self).__init__(*args, is_multiline=is_multiline,
tempfile_suffix='.sql', **kwargs)
def _multiline_exception(text):
text = text.strip()
return (text.startswith('\\') or # Special Command
text.endswith('\e') or # Ended with \e which should launch the editor.
text.endswith(';') or # Ended with a semi-colon
(text == 'exit') or # Exit doesn't need semi-colon
(text == 'quit') or # Quit doesn't need semi-colon
(text == ':q') or # To all the vim fans out there
(text == '') # Just a plain enter without any text
)
| bsd-3-clause | 228ad19e5b3ace93cd16630989a34a2c | 43.2 | 86 | 0.555656 | 4.384921 | false | false | false | false |
dbcli/vcli | vcli/packages/sqlcompletion.py | 1 | 13865 | from __future__ import print_function
import sys
import sqlparse
from sqlparse.sql import Comparison, Identifier, Where
from .parseutils import last_word, extract_tables, find_prev_keyword
from .vspecial import parse_special_command
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
def suggest_type(full_text, text_before_cursor):
"""Takes the full_text that is typed so far and also the text before the
cursor to suggest completion type and scope.
Returns a tuple with a type of entity ('table', 'column' etc) and a scope.
A scope for a column category will be a list of tables.
"""
word_before_cursor = last_word(text_before_cursor,
include='many_punctuations')
identifier = None
# If we've partially typed a word then word_before_cursor won't be an empty
# string. In that case we want to remove the partially typed string before
# sending it to the sqlparser. Otherwise the last token will always be the
# partially typed string which renders the smart completion useless because
# it will always return the list of keywords as completion.
if word_before_cursor:
if word_before_cursor[-1] == '(' or word_before_cursor[0] == '\\':
parsed = sqlparse.parse(text_before_cursor)
else:
parsed = sqlparse.parse(
text_before_cursor[:-len(word_before_cursor)])
# word_before_cursor may include a schema qualification, like
# "schema_name.partial_name" or "schema_name.", so parse it
# separately
p = sqlparse.parse(word_before_cursor)[0]
if p.tokens and isinstance(p.tokens[0], Identifier):
identifier = p.tokens[0]
else:
parsed = sqlparse.parse(text_before_cursor)
if len(parsed) > 1:
# Multiple statements being edited -- isolate the current one by
# cumulatively summing statement lengths to find the one that bounds the
# current position
current_pos = len(text_before_cursor)
stmt_start, stmt_end = 0, 0
for statement in parsed:
stmt_len = len(statement.to_unicode())
stmt_start, stmt_end = stmt_end, stmt_end + stmt_len
if stmt_end >= current_pos:
text_before_cursor = full_text[stmt_start:current_pos]
full_text = full_text[stmt_start:]
break
elif parsed:
# A single statement
statement = parsed[0]
else:
# The empty string
statement = None
# Check for special commands and handle those separately
if statement:
# Be careful here because trivial whitespace is parsed as a statement,
# but the statement won't have a first token
tok1 = statement.token_first()
if tok1 and tok1.value == '\\':
return suggest_special(text_before_cursor)
last_token = statement and statement.token_prev(len(statement.tokens)) or ''
return suggest_based_on_last_token(last_token, text_before_cursor,
full_text, identifier)
def suggest_special(text):
text = text.lstrip()
cmd, _, arg = parse_special_command(text)
if cmd == text:
# Trying to complete the special command itself
return [{'type': 'special'}]
if cmd in ('\\c', '\\connect'):
return [{'type': 'database'}]
if cmd == '\\dn':
return [{'type': 'schema'}]
if arg:
# Try to distinguish "\d name" from "\d schema.name"
# Note that this will fail to obtain a schema name if wildcards are
# used, e.g. "\d schema???.name"
parsed = sqlparse.parse(arg)[0].tokens[0]
try:
schema = parsed.get_parent_name()
except AttributeError:
schema = None
else:
schema = None
if cmd[1:] == 'd':
# \d can descibe tables or views
if schema:
return [{'type': 'table', 'schema': schema},
{'type': 'view', 'schema': schema}]
else:
return [{'type': 'schema'},
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []}]
elif cmd[1:] in ('dt', 'dv', 'df', 'dT'):
rel_type = {'dt': 'table',
'dv': 'view',
'df': 'function',
'dT': 'datatype',
}[cmd[1:]]
if schema:
return [{'type': rel_type, 'schema': schema}]
else:
return [{'type': 'schema'},
{'type': rel_type, 'schema': []}]
if cmd in ['\\n', '\\ns', '\\nd']:
return [{'type': 'namedquery'}]
return [{'type': 'keyword'}, {'type': 'special'}]
def suggest_based_on_last_token(token, text_before_cursor, full_text, identifier):
if isinstance(token, string_types):
token_v = token.lower()
elif isinstance(token, Comparison):
# If 'token' is a Comparison type such as
# 'select * FROM abc a JOIN def d ON a.id = d.'. Then calling
# token.value on the comparison type will only return the lhs of the
# comparison. In this case a.id. So we need to do token.tokens to get
# both sides of the comparison and pick the last token out of that
# list.
token_v = token.tokens[-1].value.lower()
elif isinstance(token, Where):
# sqlparse groups all tokens from the where clause into a single token
# list. This means that token.value may be something like
# 'where foo > 5 and '. We need to look "inside" token.tokens to handle
# suggestions in complicated where clauses correctly
prev_keyword, text_before_cursor = find_prev_keyword(text_before_cursor)
return suggest_based_on_last_token(prev_keyword, text_before_cursor,
full_text, identifier)
elif isinstance(token, Identifier):
# If the previous token is an identifier, we can suggest datatypes if
# we're in a parenthesized column/field list, e.g.:
# CREATE TABLE foo (Identifier <CURSOR>
# CREATE FUNCTION foo (Identifier <CURSOR>
# If we're not in a parenthesized list, the most likely scenario is the
# user is about to specify an alias, e.g.:
# SELECT Identifier <CURSOR>
# SELECT foo FROM Identifier <CURSOR>
prev_keyword, _ = find_prev_keyword(text_before_cursor)
if prev_keyword and prev_keyword.value == '(':
# Suggest datatypes
return suggest_based_on_last_token('type', text_before_cursor,
full_text, identifier)
else:
return [{'type': 'keyword'}]
else:
token_v = token.value.lower()
if not token:
return [{'type': 'keyword'}, {'type': 'special'}]
elif token_v.endswith('('):
p = sqlparse.parse(text_before_cursor)[0]
if p.tokens and isinstance(p.tokens[-1], Where):
# Four possibilities:
# 1 - Parenthesized clause like "WHERE foo AND ("
# Suggest columns/functions
# 2 - Function call like "WHERE foo("
# Suggest columns/functions
# 3 - Subquery expression like "WHERE EXISTS ("
# Suggest keywords, in order to do a subquery
# 4 - Subquery OR array comparison like "WHERE foo = ANY("
# Suggest columns/functions AND keywords. (If we wanted to be
# really fancy, we could suggest only array-typed columns)
column_suggestions = suggest_based_on_last_token('where',
text_before_cursor, full_text, identifier)
# Check for a subquery expression (cases 3 & 4)
where = p.tokens[-1]
prev_tok = where.token_prev(len(where.tokens) - 1)
if isinstance(prev_tok, Comparison):
# e.g. "SELECT foo FROM bar WHERE foo = ANY("
prev_tok = prev_tok.tokens[-1]
prev_tok = prev_tok.value.lower()
if prev_tok == 'exists':
return [{'type': 'keyword'}]
elif prev_tok in ('any', 'some', 'all'):
return column_suggestions + [{'type': 'keyword'}]
elif prev_tok == 'in':
# Technically, we should suggest columns AND keywords, as
# per case 4. However, IN is different from ANY, SOME, ALL
# in that it can accept a *list* of columns, or a subquery.
# But suggesting keywords for , "SELECT * FROM foo WHERE bar IN
# (baz, qux, " would be overwhelming. So we special case 'IN'
# to not suggest keywords.
return column_suggestions
else:
return column_suggestions
# Get the token before the parens
prev_tok = p.token_prev(len(p.tokens) - 1)
if prev_tok and prev_tok.value and prev_tok.value.lower() == 'using':
# tbl1 INNER JOIN tbl2 USING (col1, col2)
tables = extract_tables(full_text)
# suggest columns that are present in more than one table
return [{'type': 'column', 'tables': tables, 'drop_unique': True}]
elif p.token_first().value.lower() == 'select':
# If the lparen is preceeded by a space chances are we're about to
# do a sub-select.
if last_word(text_before_cursor,
'all_punctuations').startswith('('):
return [{'type': 'keyword'}]
# We're probably in a function argument list
return [{'type': 'column', 'tables': extract_tables(full_text)}]
elif token_v in ('set', 'by', 'distinct'):
return [{'type': 'column', 'tables': extract_tables(full_text)}]
elif token_v in ('select', 'where', 'having'):
# Check for a table alias or schema qualification
parent = (identifier and identifier.get_parent_name()) or []
if parent:
tables = extract_tables(full_text)
tables = [t for t in tables if identifies(parent, *t)]
return [{'type': 'column', 'tables': tables},
{'type': 'table', 'schema': parent},
{'type': 'view', 'schema': parent},
{'type': 'function', 'schema': parent}]
else:
return [{'type': 'column', 'tables': extract_tables(full_text)},
{'type': 'function', 'schema': []}]
elif (token_v.endswith('join') and token.is_keyword) or (token_v in
('copy', 'from', 'update', 'into', 'describe', 'truncate')):
schema = (identifier and identifier.get_parent_name()) or []
# Suggest tables from either the currently-selected schema or the
# public schema if no schema has been specified
suggest = [{'type': 'table', 'schema': schema}]
if not schema:
# Suggest schemas
suggest.insert(0, {'type': 'schema'})
# Only tables can be TRUNCATED, otherwise suggest views
if token_v != 'truncate':
suggest.append({'type': 'view', 'schema': schema})
return suggest
elif token_v in ('table', 'view', 'function'):
# E.g. 'DROP FUNCTION <funcname>', 'ALTER TABLE <tablname>'
rel_type = token_v
schema = (identifier and identifier.get_parent_name()) or []
if schema:
return [{'type': rel_type, 'schema': schema}]
else:
return [{'type': 'schema'}, {'type': rel_type, 'schema': []}]
elif token_v == 'on':
tables = extract_tables(full_text) # [(schema, table, alias), ...]
parent = (identifier and identifier.get_parent_name()) or []
if parent:
# "ON parent.<suggestion>"
# parent can be either a schema name or table alias
tables = [t for t in tables if identifies(parent, *t)]
return [{'type': 'column', 'tables': tables},
{'type': 'table', 'schema': parent},
{'type': 'view', 'schema': parent},
{'type': 'function', 'schema': parent}]
else:
# ON <suggestion>
# Use table alias if there is one, otherwise the table name
aliases = [t[2] or t[1] for t in tables]
return [{'type': 'alias', 'aliases': aliases}]
elif token_v in ('c', 'use', 'database', 'template'):
# "\c <db", "use <db>", "DROP DATABASE <db>",
# "CREATE DATABASE <newdb> WITH TEMPLATE <db>"
return [{'type': 'database'}]
elif token_v == 'schema':
# DROP SCHEMA schema_name
return [{'type': 'schema'}]
elif token_v.endswith(',') or token_v == '=':
prev_keyword, text_before_cursor = find_prev_keyword(text_before_cursor)
if prev_keyword:
return suggest_based_on_last_token(
prev_keyword, text_before_cursor, full_text, identifier)
else:
return []
elif token_v in ('type', '::'):
# ALTER TABLE foo SET DATA TYPE bar
# SELECT foo::bar
# Note that tables are a form of composite type in postgresql, so
# they're suggested here as well
schema = (identifier and identifier.get_parent_name()) or []
suggestions = [{'type': 'datatype', 'schema': schema},
{'type': 'table', 'schema': schema}]
if not schema:
suggestions.append({'type': 'schema'})
return suggestions
else:
return [{'type': 'keyword'}]
def identifies(id, schema, table, alias):
return id == alias or id == table or (
schema and (id == schema + '.' + table))
| bsd-3-clause | 85173c5b3dd5594f72fa00e697aa4e28 | 41.015152 | 82 | 0.560765 | 4.185029 | false | false | false | false |
pecan/pecan | pecan/tests/middleware/test_static.py | 3 | 2390 | from pecan.middleware.static import (StaticFileMiddleware, FileWrapper,
_dump_date)
from pecan.tests import PecanTestCase
import os
class TestStaticFileMiddleware(PecanTestCase):
def setUp(self):
super(TestStaticFileMiddleware, self).setUp()
def app(environ, start_response):
response_headers = [('Content-type', 'text/plain')]
start_response('200 OK', response_headers)
return ['Hello world!\n']
self.app = StaticFileMiddleware(
app, os.path.dirname(__file__)
)
self._status = None
self._response_headers = None
def _request(self, path):
def start_response(status, response_headers, exc_info=None):
self._status = status
self._response_headers = response_headers
return self.app(
dict(PATH_INFO=path),
start_response
)
def _get_response_header(self, header):
for k, v in self._response_headers:
if k.upper() == header.upper():
return v
return None
def test_file_can_be_found(self):
result = self._request('/static_fixtures/text.txt')
assert isinstance(result, FileWrapper)
result.close()
def test_no_file_found_causes_passthrough(self):
result = self._request('/static_fixtures/nosuchfile.txt')
assert not isinstance(result, FileWrapper)
assert result == ['Hello world!\n']
def test_mime_type_works_for_png_files(self):
result = self._request('/static_fixtures/self.png')
assert self._get_response_header('Content-Type') == 'image/png'
result.close()
def test_file_can_be_closed(self):
result = self._request('/static_fixtures/text.txt')
assert result.close() is None
def test_file_can_be_iterated_over(self):
result = self._request('/static_fixtures/text.txt')
assert len([x for x in result])
result.close()
def test_date_dumping_on_unix_timestamps(self):
result = _dump_date(1331755274.59, ' ')
assert result == 'Wed, 14 Mar 2012 20:01:14 GMT'
def test_separator_sanitization_still_finds_file(self):
os.altsep = ':'
result = self._request(':static_fixtures:text.txt')
assert isinstance(result, FileWrapper)
result.close()
| bsd-3-clause | ba4bbd62224c13f3bc5f060833a3e8d6 | 32.194444 | 71 | 0.606695 | 3.956954 | false | true | false | false |
tulip-control/tulip-control | examples/pwa.py | 1 | 3986 | #!/usr/bin/env python
"""Controller synthesis for system with piecewise-affine continuous dynamics."""
# This example is an extension of `robot_continuous.py`
# by Petter Nilsson and Nok Wongpiromsarn.
# Necmiye Ozay, August 26, 2012
from __future__ import print_function
import numpy as np
from polytope import box2poly
from tulip.abstract import discretize
from tulip.abstract import prop2part
from tulip.abstract.plot import plot_strategy
from tulip.hybrid import LtiSysDyn
from tulip.hybrid import PwaSysDyn
from tulip import spec
from tulip import synth
# set to `True` if `matplotlib.pyplot` is available
plotting = False
# Problem parameters
input_bound = 0.4
uncertainty = 0.05
# Continuous state space
cont_state_space = box2poly([[0., 3.], [0., 2.]])
# Assume, for instance, our robot is traveling on
# a nonhomogenous surface (xy plane),
# resulting in different dynamics at different
# parts of the plane.
#
# Since the continuous state space in this example
# is just xy position, different dynamics in
# different parts of the surface can be modeled
# using LtiSysDyn subsystems subsys0 and subsys1.
#
# Together they comprise a Piecewise Affine System:
# @subsystem0@
def subsys0():
A = np.array([[1.105_2, 0.], [ 0., 1.105_2]])
B = np.array([[1.105_2, 0.], [ 0., 1.105_2]])
E = np.array([[1,0], [0,1]])
U = box2poly([[-1., 1.], [-1., 1.]])
U.scale(input_bound)
W = box2poly([[-1., 1.], [-1., 1.]])
W.scale(uncertainty)
dom = box2poly([[0., 3.], [0.5, 2.]])
sys_dyn = LtiSysDyn(A, B, E, None, U, W, dom)
#sys_dyn.plot()
return sys_dyn
# @subsystem0_end@
# @subsystem1@
def subsys1():
A = np.array([[0.994_8, 0.], [0., 1.105_2]])
B = np.array([[-1.105_2, 0.], [0., 1.105_2]])
E = np.array([[1, 0], [0, 1]])
U = box2poly([[-1., 1.], [-1., 1.]])
U.scale(input_bound)
W = box2poly([[-1., 1.], [-1., 1.]])
W.scale(uncertainty)
dom = box2poly([[0., 3.], [0., 0.5]])
sys_dyn = LtiSysDyn(A, B, E, None, U, W, dom)
#sys_dyn.plot()
return sys_dyn
# @subsystem1_end@
# @pwasystem@
subsystems = [subsys0(), subsys1()]
# Build piecewise affine system from its subsystems
sys_dyn = PwaSysDyn(subsystems, cont_state_space)
if plotting:
ax = sys_dyn.plot()
ax.figure.savefig('pwa_sys_dyn.pdf')
# @pwasystem_end@
# Continuous proposition
cont_props = {}
cont_props['home'] = box2poly([[0., 1.], [0., 1.]])
cont_props['lot'] = box2poly([[2., 3.], [1., 2.]])
# Compute the proposition preserving partition
# of the continuous state space
cont_partition = prop2part(cont_state_space, cont_props)
if plotting:
ax = cont_partition.plot()
cont_partition.plot_props(ax=ax)
ax.figure.savefig('spec_ppp.pdf')
disc_dynamics = discretize(
cont_partition, sys_dyn, closed_loop=True,
N=8, min_cell_volume=0.1, plotit=plotting, save_img=True,
cont_props=cont_props)
if plotting:
ax = disc_dynamics.plot(show_ts=True)
ax.figure.savefig('abs_pwa.pdf')
disc_dynamics.ts.save('ts.pdf')
# Specifications
# Environment variables and assumptions
env_vars = {'park'}
env_init = set() # empty set
env_prog = '!park'
env_safe = set() # empty set
# System variables and requirements
sys_vars = {'X0reach'}
# []<>home
sys_prog = {'home'}
# [](park -> <> lot)
sys_init = {'X0reach'}
sys_safe = {'X(X0reach) <-> lot || (X0reach && !park)'}
sys_prog |= {'X0reach'}
# Create the specification
specs = spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
specs.moore = True
specs.qinit = r'\E \A'
# Synthesize
ctrl = synth.synthesize(specs,
sys=disc_dynamics.ts, ignore_sys_init=True)
assert ctrl is not None, 'unrealizable'
if plotting:
ax = plot_strategy(disc_dynamics, ctrl)
ax.figure.savefig('pwa_proj_mealy.pdf')
# Save graphical representation of controller for viewing
if not ctrl.save('pwa.png'):
print(ctrl)
# Simulation
| bsd-3-clause | a193d5b2bec2554f1db966299e1c03e1 | 25.573333 | 80 | 0.646764 | 2.871758 | false | false | false | false |
tulip-control/tulip-control | tulip/spec/prioritized_safety.py | 1 | 5753 | # Copyright (c) 2020 by California Institute of Technology
# and University of Texas at Austin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from itertools import product
from tulip.transys.automata import FiniteStateAutomaton as FA
class FAWithPriority(object):
"""A class for defining a rule, represented by a nondeterministic finite automaton,
with priority"""
def __init__(self, fa, priority, level):
assert isinstance(fa, FA)
assert isinstance(priority, int)
assert isinstance(level, int)
self._fa = fa
self._priority = priority
self._level = level
def priority(self):
"""Get the priority of this rule"""
return self._priority
def automaton(self):
"""Get the automaton of this rule"""
return self._fa
def level(self):
"""Get the level of this rule"""
return self._level
class PrioritizedSpecification(object):
"""A class for defining a prioritized safety specification"""
def __init__(self):
self._Psi = []
self.atomic_propositions = []
def __getitem__(self, key):
assert key >= 0
level = 0
while level < len(self._Psi) and key >= len(self._Psi[level]):
key -= len(self._Psi[level])
level += 1
if level < len(self._Psi) and key < len(self._Psi[level]):
return self._Psi[level][key]
raise IndexError("index out of range")
def __iter__(self):
self._iter_level = 0
self._iter_index = 0
return self
def __next__(self):
while self._iter_level < len(self._Psi) and self._iter_index >= len(
self._Psi[self._iter_level]
):
self._iter_index = 0
self._iter_level += 1
if self._iter_level >= len(self._Psi) or self._iter_index >= len(
self._Psi[self._iter_level]
):
raise StopIteration
result = self._Psi[self._iter_level][self._iter_index]
self._iter_index += 1
return result
def next(self):
return self.__next__()
def __len__(self):
return sum([len(psi) for psi in self._Psi])
def add_rule(self, fa, priority, level):
"""Add rule with automaton fa, priority and level to the specification
@param fa: FiniteStateAutomaton representing the correctness of the rule
@param priority: float or int representing the priority of the rule
@param level: int representing the level of the rule in the hierarchy
"""
assert isinstance(fa, FA)
assert isinstance(priority, float) or isinstance(priority, int)
assert isinstance(level, int)
assert priority > 0
assert level >= 0
# Check the consistency of atomic propositions
if len(self._Psi) == 0:
self.atomic_propositions = fa.atomic_propositions
else:
assert self.atomic_propositions == fa.atomic_propositions
# Add the rule
rule = FAWithPriority(fa, priority, level)
for l in range(len(self._Psi), level + 1):
self._Psi.append([])
self._Psi[level].append(rule)
def get_rules_at(self, level):
"""Return the list of rules at the given level
@rtype a list of FAWithPriority
"""
if level >= len(self._Psi):
return []
return self._Psi[level]
def get_rules(self):
"""Return the list of all the rules
@rtype a list of FAWithPriority
"""
return [phi for psi in self._Psi for phi in psi]
def get_states(self):
"""Get the product of the states in all the finite automata
"""
return product(*[phi.automaton().states for phi in self])
def get_initial_states(self):
"""Get the product of the initial states of all the finite automata
"""
return product(*[phi.automaton().states.initial for phi in self])
def get_accepting_states(self):
"""Get the product of the accepting states of all the finite automata
"""
return product(*[phi.automaton().states.accepting for phi in self])
def get_num_levels(self):
"""Get the number of levels
"""
return len(self._Psi)
| bsd-3-clause | 7815dd2e23f69e59564a48fd0d9cf50d | 33.449102 | 87 | 0.641057 | 4.196207 | false | false | false | false |
tulip-control/tulip-control | tulip/spec/parser.py | 1 | 2869 | # Copyright (c) 2011-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
"""LTL parser supporting JTLV, SPIN, SMV, and gr1c syntax"""
from __future__ import absolute_import
from __future__ import print_function
import re
from tulip.spec import ast, lexyacc
__all__ = ['parse']
# cache
parsers = dict()
def parse(formula, full_operators=False):
"""Parse formula string and create abstract syntax tree (AST).
@param full_operators: replace full names of operators
with their symbols (case insensitive,
each operator must be a separate word).
@type full_operators: C{bool}
"""
if full_operators:
formula = _replace_full_name_operators(formula)
if parsers.get('ply') is None:
parsers['ply'] = lexyacc.Parser()
spec = parsers['ply'].parse(formula)
# did ply fail merely printing warnings ?
if spec is None:
raise Exception('Parsing formula:\n{f}\nfailed'.format(f=formula))
return spec
def _replace_full_name_operators(formula):
"""Replace full names with symbols for temporal and Boolean operators.
Each operator must be a word (as defined by \\b in regexp).
Substitution is case insensitive.
"""
for name, symbol in ast.FULL_OPERATOR_NAMES.items():
formula = re.sub(r'(?i)\b' + name + r'\b', symbol, formula)
return formula
| bsd-3-clause | 8d8305fb21ad084b80eac557302a18ca | 37.77027 | 74 | 0.730917 | 4.188321 | false | false | false | false |
poldracklab/mriqc | mriqc/classifier/sklearn/_validation.py | 1 | 8919 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
# STATEMENT OF CHANGES: This file is derived from the sources of scikit-learn 0.19,
# which licensed under the BSD 3-clause.
# This file contains extensions and modifications to the original code.
import logging
import numbers
import time
import warnings
import numpy as np
from joblib import Parallel, delayed, logger
from sklearn.base import clone, is_classifier
from sklearn.exceptions import FitFailedWarning
from sklearn.metrics import check_scoring
from sklearn.model_selection._split import check_cv
from sklearn.utils import check_random_state, indexable, _safe_indexing as safe_indexing
from sklearn.utils.metaestimators import _safe_split
from sklearn.utils.validation import _num_samples
# import scipy.sparse as sp
# from sklearn.preprocessing import LabelEncoder
LOG = logging.getLogger("mriqc.classifier")
def cross_val_score(
estimator,
X,
y=None,
groups=None,
scoring=None,
cv=None,
n_jobs=1,
verbose=0,
fit_params=None,
pre_dispatch="2*n_jobs",
):
"""
Evaluate a score by cross-validation
"""
if not isinstance(scoring, (list, tuple)):
scoring = [scoring]
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
splits = list(cv.split(X, y, groups))
scorer = [check_scoring(estimator, scoring=s) for s in scoring]
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
scores = parallel(
delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose, None, fit_params
)
for train, test in splits
)
group_order = []
if hasattr(cv, "groups"):
group_order = [np.array(cv.groups)[test].tolist()[0] for _, test in splits]
return np.squeeze(np.array(scores)), group_order
def _fit_and_score(
estimator,
X,
y,
scorer,
train,
test,
verbose,
parameters,
fit_params,
return_train_score=False,
return_parameters=False,
return_n_test_samples=False,
return_times=False,
error_score="raise",
):
"""
Fit estimator and compute scores for a given dataset split.
"""
if verbose > 1:
if parameters is None:
msg = ""
else:
msg = "%s" % (", ".join("%s=%s" % (k, v) for k, v in parameters.items()))
LOG.info("[CV] %s %s", msg, (64 - len(msg)) * ".")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict(
[(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]
)
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == "raise":
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn(
"Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e),
FitFailedWarning,
)
else:
raise ValueError(
"error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
fit_time = time.time() - start_time
test_score = [_score(estimator, X_test, y_test, s) for s in scorer]
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = [_score(estimator, X_train, y_train, s) for s in scorer]
if verbose > 2:
msg += ", score=".join(("%f" % ts for ts in test_score))
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
LOG.info("[CV] %s %s", (64 - len(end_msg)) * ".", end_msg)
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, "item"):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError(
"scoring must return a number, got %s (%s) instead."
% (str(score), type(score))
)
return score
def permutation_test_score(
estimator,
X,
y,
groups=None,
cv=None,
n_permutations=100,
n_jobs=1,
random_state=0,
verbose=0,
scoring=None,
):
"""
Evaluate the significance of a cross-validated score with permutations,
as in test 1 of [Ojala2010]_.
A modification of original sklearn's permutation test score function
to evaluate p-value outside this function, so that the score can be
reused from outside.
.. [Ojala2010] Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state), groups, cv, scorer
)
for _ in range(n_permutations)
)
permutation_scores = np.array(permutation_scores)
return permutation_scores
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = groups == group
indices[this_mask] = random_state.permutation(indices[this_mask])
return safe_indexing(y, indices)
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
| bsd-3-clause | 0504c439ff7036b0f067edaae4d95805 | 30.967742 | 88 | 0.625855 | 3.682494 | false | true | false | false |
poldracklab/mriqc | mriqc/qc/functional.py | 2 | 9444 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
r"""
Measures for the spatial information
====================================
Definitions are given in the
:ref:`summary of structural IQMs <iqms_t1w>`.
.. _iqms_efc:
- **Entropy-focus criterion** (:py:func:`~mriqc.qc.anatomical.efc`).
.. _iqms_fber:
- **Foreground-Background energy ratio** (:py:func:`~mriqc.qc.anatomical.fber`, [Shehzad2015]_).
.. _iqms_fwhm:
- **Full-width half maximum smoothness** (``fwhm_*``, see [Friedman2008]_).
.. _iqms_snr:
- **Signal-to-noise ratio** (:py:func:`~mriqc.qc.anatomical.snr`).
.. _iqms_summary:
- **Summary statistics** (:py:func:`~mriqc.qc.anatomical.summary_stats`).
Measures for the temporal information
-------------------------------------
.. _iqms_dvars :
DVARS
D referring to temporal derivative of timecourses, VARS referring to
RMS variance over voxels ([Power2012]_ ``dvars_nstd``) indexes the rate of change of
BOLD signal across the entire brain at each frame of data. DVARS is calculated
`with nipype
<http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.algorithms.confounds.html#computedvars>`_
after motion correction:
.. math ::
\text{DVARS}_t = \sqrt{\frac{1}{N}\sum_i \left[x_{i,t} - x_{i,t-1}\right]^2}
.. note ::
Intensities are scaled to 1000 leading to the units being expressed in x10
:math:`\%\Delta\text{BOLD}` change.
.. note ::
MRIQC calculates two additional standardized values of the DVARS.
The ``dvars_std`` metric is normalized with the standard deviation of the
temporal difference time series. The ``dvars_vstd`` is a voxel-wise
standardization of DVARS, where the temporal difference time series is
normalized across time by that voxel standard deviation across time, before
computing the RMS of the temporal difference [Nichols2013]_.
.. _iqms_gcor:
Global Correlation (``gcor``)
calculates an optimized summary of time-series
correlation as in [Saad2013]_ using AFNI's ``@compute_gcor``:
.. math ::
\text{GCOR} = \frac{1}{N}\mathbf{g}_u^T\mathbf{g}_u
where :math:`\mathbf{g}_u` is the average of all unit-variance time series in a
:math:`T` (# timepoints) :math:`\times` :math:`N` (# voxels) matrix.
.. _iqms_tsnr:
Temporal SNR (:abbr:`tSNR (temporal SNR)`, ``tsnr``)
is a simplified interpretation of the tSNR definition [Kruger2001]_.
We report the median value
of the `tSNR map
<http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.algorithms.confounds.html#tsnr>`_
calculated like:
.. math ::
\text{tSNR} = \frac{\langle S \rangle_t}{\sigma_t},
where :math:`\langle S \rangle_t` is the average BOLD signal (across time),
and :math:`\sigma_t` is the corresponding temporal standard-deviation map. Higher
values are better.
Measures for artifacts and other
--------------------------------
.. _iqms_fd:
Framewise Displacement
expresses instantaneous head-motion [Jenkinson2002]_.
MRIQC reports the average FD, labeled as ``fd_mean``.
Rotational displacements are calculated as the displacement on the surface of a
sphere of radius 50 mm [Power2012]_:
.. math ::
\text{FD}_t = |\Delta d_{x,t}| + |\Delta d_{y,t}| +
|\Delta d_{z,t}| + |\Delta \alpha_t| + |\Delta \beta_t| + |\Delta \gamma_t|
Along with the base framewise displacement, MRIQC reports the
**number of timepoints above FD threshold** (``fd_num``), and the
**percent of FDs above the FD threshold** w.r.t. the full timeseries (``fd_perc``).
In both cases, the threshold is set at 0.20mm.
.. _iqms_gsr:
Ghost to Signal Ratio (:py:func:`~mriqc.qc.functional.gsr`)
labeled in the reports as ``gsr_x`` and ``gsr_y``
(calculated along the two possible phase-encoding axes **x**, **y**):
.. math ::
\text{GSR} = \frac{\mu_G - \mu_{NG}}{\mu_S}
.. image :: ../_static/epi-gsrmask.png
:width: 200px
:align: center
.. _iqms_aor:
AFNI's outlier ratio (``aor``)
Mean fraction of outliers per fMRI volume
as given by AFNI's ``3dToutcount``.
.. _iqms_aqi:
AFNI's quality index (``aqi``)
Mean quality index as computed by AFNI's ``3dTqual``; for each volume,
it is one minus the Spearman's (rank) correlation of that volume with the
median volume. Lower values are better.
.. _iqms_dummy:
Number of *dummy* scans** (``dummy``)
A number of volumes in the begining of the
fMRI timeseries identified as non-steady state.
.. topic:: References
.. [Atkinson1997] Atkinson et al., *Automatic correction of motion artifacts
in magnetic resonance images using an entropy
focus criterion*, IEEE Trans Med Imag 16(6):903-910, 1997.
doi:`10.1109/42.650886 <http://dx.doi.org/10.1109/42.650886>`_.
.. [Friedman2008] Friedman, L et al., *Test--retest and between‐site reliability in a multicenter
fMRI study*. Hum Brain Mapp, 29(8):958--972, 2008. doi:`10.1002/hbm.20440
<http://dx.doi.org/10.1002/hbm.20440>`_.
.. [Giannelli2010] Giannelli et al., *Characterization of Nyquist ghost in
EPI-fMRI acquisition sequences implemented on two clinical 1.5 T MR scanner
systems: effect of readout bandwidth and echo spacing*. J App Clin Med Phy,
11(4). 2010.
doi:`10.1120/jacmp.v11i4.3237 <http://dx.doi.org/10.1120/jacmp.v11i4.3237>`_.
.. [Jenkinson2002] Jenkinson et al., *Improved Optimisation for the Robust and
Accurate Linear Registration and Motion Correction of Brain Images*.
NeuroImage, 17(2), 825-841, 2002.
doi:`10.1006/nimg.2002.1132 <http://dx.doi.org/10.1006/nimg.2002.1132>`_.
.. [Kruger2001] Krüger et al., *Physiological noise in oxygenation-sensitive
magnetic resonance imaging*, Magn. Reson. Med. 46(4):631-637, 2001.
doi:`10.1002/mrm.1240 <http://dx.doi.org/10.1002/mrm.1240>`_.
.. [Nichols2013] Nichols, `Notes on Creating a Standardized Version of DVARS
<http://www2.warwick.ac.uk/fac/sci/statistics/staff/academic-research/nichols/scripts/fsl/standardizeddvars.pdf>`_,
2013.
.. [Power2012] Power et al., *Spurious but systematic correlations in
functional connectivity MRI networks arise from subject motion*,
NeuroImage 59(3):2142-2154,
2012, doi:`10.1016/j.neuroimage.2011.10.018
<http://dx.doi.org/10.1016/j.neuroimage.2011.10.018>`_.
.. [Saad2013] Saad et al. *Correcting Brain-Wide Correlation Differences
in Resting-State FMRI*, Brain Conn 3(4):339-352,
2013, doi:`10.1089/brain.2013.0156
<http://dx.doi.org/10.1089/brain.2013.0156>`_.
"""
import os.path as op
import numpy as np
RAS_AXIS_ORDER = {"x": 0, "y": 1, "z": 2}
def gsr(epi_data, mask, direction="y", ref_file=None, out_file=None):
"""
Compute the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_.
The procedure is as follows:
#. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.
#. Rotate by :math:`N/2`
#. Remove the intersection with the original mask
#. Generate a non-ghost background
#. Calculate the :abbr:`GSR (ghost to signal ratio)`
.. warning ::
This should be used with EPI images for which the phase
encoding direction is known.
:param str epi_file: path to epi file
:param str mask_file: path to brain mask
:param str direction: the direction of phase encoding (x, y, all)
:return: the computed gsr
"""
direction = direction.lower()
if direction[-1] not in ["x", "y", "all"]:
raise Exception(
"Unknown direction {}, should be one of x, -x, y, -y, all".format(direction)
)
if direction == "all":
result = []
for newdir in ["x", "y"]:
ofile = None
if out_file is not None:
fname, ext = op.splitext(ofile)
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
ofile = "{0}_{1}{2}".format(fname, newdir, ext)
result += [gsr(epi_data, mask, newdir, ref_file=ref_file, out_file=ofile)]
return result
# Roll data of mask through the appropriate axis
axis = RAS_AXIS_ORDER[direction]
n2_mask = np.roll(mask, mask.shape[axis] // 2, axis=axis)
# Step 3: remove from n2_mask pixels inside the brain
n2_mask = n2_mask * (1 - mask)
# Step 4: non-ghost background region is labeled as 2
n2_mask = n2_mask + 2 * (1 - n2_mask - mask)
# Step 5: signal is the entire foreground image
ghost = np.mean(epi_data[n2_mask == 1]) - np.mean(epi_data[n2_mask == 2])
signal = np.median(epi_data[n2_mask == 0])
return float(ghost / signal)
| bsd-3-clause | 1e51489be857ab4800ddc6ca4baa0378 | 33.206522 | 119 | 0.662536 | 3.113786 | false | false | false | false |
poldracklab/mriqc | mriqc/workflows/anatomical.py | 1 | 28238 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Anatomical workflow
===================
.. image :: _static/anatomical_workflow_source.svg
The anatomical workflow follows the following steps:
#. Conform (reorientations, revise data types) input data and read
associated metadata.
#. Skull-stripping (AFNI).
#. Calculate head mask -- :py:func:`headmsk_wf`.
#. Spatial Normalization to MNI (ANTs)
#. Calculate air mask above the nasial-cerebelum plane -- :py:func:`airmsk_wf`.
#. Brain tissue segmentation (FAST).
#. Extraction of IQMs -- :py:func:`compute_iqms`.
#. Individual-reports generation -- :py:func:`individual_reports`.
This workflow is orchestrated by :py:func:`anat_qc_workflow`.
For the skull-stripping, we use ``afni_wf`` from ``niworkflows.anat.skullstrip``:
.. workflow::
from niworkflows.anat.skullstrip import afni_wf
from mriqc.testing import mock_config
with mock_config():
wf = afni_wf()
"""
from mriqc import config
from mriqc.interfaces import (
ArtifactMask,
ComputeQI2,
ConformImage,
IQMFileSink,
RotationMask,
StructuralQC,
)
from mriqc.interfaces.reports import AddProvenance
from mriqc.messages import BUILDING_WORKFLOW
from mriqc.workflows.utils import get_fwhmx
from nipype.interfaces import ants, fsl
from nipype.interfaces import io as nio
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from templateflow.api import get as get_template
def anat_qc_workflow(name="anatMRIQC"):
"""
One-subject-one-session-one-run pipeline to extract the NR-IQMs from
anatomical images
.. workflow::
import os.path as op
from mriqc.workflows.anatomical import anat_qc_workflow
from mriqc.testing import mock_config
with mock_config():
wf = anat_qc_workflow()
"""
from niworkflows.anat.skullstrip import afni_wf as skullstrip_wf
dataset = config.workflow.inputs.get("T1w", []) + config.workflow.inputs.get(
"T2w", []
)
message = BUILDING_WORKFLOW.format(dataset=", ".join(dataset))
config.loggers.workflow.info(message)
# Initialize workflow
workflow = pe.Workflow(name=name)
# Define workflow, inputs and outputs
# 0. Get data
inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
inputnode.iterables = [("in_file", dataset)]
outputnode = pe.Node(niu.IdentityInterface(fields=["out_json"]), name="outputnode")
# 1. Reorient anatomical image
to_ras = pe.Node(ConformImage(check_dtype=False), name="conform")
# 2. Skull-stripping (afni)
asw = skullstrip_wf(n4_nthreads=config.nipype.omp_nthreads, unifize=False)
# 3. Head mask
hmsk = headmsk_wf()
# 4. Spatial Normalization, using ANTs
norm = spatial_normalization()
# 5. Air mask (with and without artifacts)
amw = airmsk_wf()
# 6. Brain tissue segmentation
segment = pe.Node(
fsl.FAST(segments=True, out_basename="segment"),
name="segmentation",
mem_gb=5,
)
# 7. Compute IQMs
iqmswf = compute_iqms()
# Reports
repwf = individual_reports()
# Connect all nodes
# fmt: off
workflow.connect([
(inputnode, to_ras, [("in_file", "in_file")]),
(inputnode, iqmswf, [("in_file", "inputnode.in_file")]),
(inputnode, norm, [(("in_file", _get_mod), "inputnode.modality")]),
(inputnode, segment, [(("in_file", _get_imgtype), "img_type")]),
(to_ras, asw, [("out_file", "inputnode.in_file")]),
(asw, segment, [("outputnode.out_file", "in_files")]),
(asw, hmsk, [("outputnode.bias_corrected", "inputnode.in_file")]),
(segment, hmsk, [("tissue_class_map", "inputnode.in_segm")]),
(asw, norm, [("outputnode.bias_corrected", "inputnode.moving_image"),
("outputnode.out_mask", "inputnode.moving_mask")]),
(norm, amw, [
("outputnode.inverse_composite_transform", "inputnode.inverse_composite_transform")]),
(norm, iqmswf, [
("outputnode.inverse_composite_transform", "inputnode.inverse_composite_transform")]),
(norm, repwf, ([
("outputnode.out_report", "inputnode.mni_report")])),
(to_ras, amw, [("out_file", "inputnode.in_file")]),
(asw, amw, [("outputnode.out_mask", "inputnode.in_mask")]),
(hmsk, amw, [("outputnode.out_file", "inputnode.head_mask")]),
(to_ras, iqmswf, [("out_file", "inputnode.in_ras")]),
(asw, iqmswf, [("outputnode.bias_corrected", "inputnode.inu_corrected"),
("outputnode.bias_image", "inputnode.in_inu"),
("outputnode.out_mask", "inputnode.brainmask")]),
(amw, iqmswf, [("outputnode.air_mask", "inputnode.airmask"),
("outputnode.hat_mask", "inputnode.hatmask"),
("outputnode.art_mask", "inputnode.artmask"),
("outputnode.rot_mask", "inputnode.rotmask")]),
(segment, iqmswf, [("tissue_class_map", "inputnode.segmentation"),
("partial_volume_files", "inputnode.pvms")]),
(hmsk, iqmswf, [("outputnode.out_file", "inputnode.headmask")]),
(to_ras, repwf, [("out_file", "inputnode.in_ras")]),
(asw, repwf, [("outputnode.bias_corrected", "inputnode.inu_corrected"),
("outputnode.out_mask", "inputnode.brainmask")]),
(hmsk, repwf, [("outputnode.out_file", "inputnode.headmask")]),
(amw, repwf, [("outputnode.air_mask", "inputnode.airmask"),
("outputnode.art_mask", "inputnode.artmask"),
("outputnode.rot_mask", "inputnode.rotmask")]),
(segment, repwf, [("tissue_class_map", "inputnode.segmentation")]),
(iqmswf, repwf, [("outputnode.noisefit", "inputnode.noisefit")]),
(iqmswf, repwf, [("outputnode.out_file", "inputnode.in_iqms")]),
(iqmswf, outputnode, [("outputnode.out_file", "out_json")]),
])
# fmt: on
# Upload metrics
if not config.execution.no_sub:
from ..interfaces.webapi import UploadIQMs
upldwf = pe.Node(UploadIQMs(), name="UploadMetrics")
upldwf.inputs.url = config.execution.webapi_url
upldwf.inputs.strict = config.execution.upload_strict
if config.execution.webapi_port:
upldwf.inputs.port = config.execution.webapi_port
# fmt: off
workflow.connect([
(iqmswf, upldwf, [("outputnode.out_file", "in_iqms")]),
(upldwf, repwf, [("api_id", "inputnode.api_id")]),
])
# fmt: on
return workflow
def spatial_normalization(name="SpatialNormalization", resolution=2):
"""Create a simplied workflow to perform fast spatial normalization."""
from niworkflows.interfaces.reportlets.registration import (
SpatialNormalizationRPT as RobustMNINormalization,
)
# Have the template id handy
tpl_id = config.workflow.template_id
# Define workflow interface
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=["moving_image", "moving_mask", "modality"]),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["inverse_composite_transform", "out_report"]),
name="outputnode",
)
# Spatial normalization
norm = pe.Node(
RobustMNINormalization(
flavor=["testing", "fast"][config.execution.debug],
num_threads=config.nipype.omp_nthreads,
float=config.execution.ants_float,
template=tpl_id,
template_resolution=resolution,
generate_report=True,
),
name="SpatialNormalization",
# Request all MultiProc processes when ants_nthreads > n_procs
num_threads=config.nipype.omp_nthreads,
mem_gb=3,
)
norm.inputs.reference_mask = str(
get_template(tpl_id, resolution=resolution, desc="brain", suffix="mask")
)
# fmt: off
workflow.connect([
(inputnode, norm, [("moving_image", "moving_image"),
("moving_mask", "moving_mask"),
("modality", "reference")]),
(norm, outputnode, [("inverse_composite_transform", "inverse_composite_transform"),
("out_report", "out_report")]),
])
# fmt: on
return workflow
def compute_iqms(name="ComputeIQMs"):
"""
Setup the workflow that actually computes the IQMs.
.. workflow::
from mriqc.workflows.anatomical import compute_iqms
from mriqc.testing import mock_config
with mock_config():
wf = compute_iqms()
"""
from niworkflows.interfaces.bids import ReadSidecarJSON
from ..interfaces.anatomical import Harmonize
from .utils import _tofloat
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"in_file",
"in_ras",
"brainmask",
"airmask",
"artmask",
"headmask",
"rotmask",
"hatmask",
"segmentation",
"inu_corrected",
"in_inu",
"pvms",
"metadata",
"inverse_composite_transform",
]
),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["out_file", "noisefit"]),
name="outputnode",
)
# Extract metadata
meta = pe.Node(ReadSidecarJSON(), name="metadata")
# Add provenance
addprov = pe.Node(AddProvenance(), name="provenance", run_without_submitting=True)
# AFNI check smoothing
fwhm_interface = get_fwhmx()
fwhm = pe.Node(fwhm_interface, name="smoothness")
# Harmonize
homog = pe.Node(Harmonize(), name="harmonize")
# Mortamet's QI2
getqi2 = pe.Node(ComputeQI2(), name="ComputeQI2")
# Compute python-coded measures
measures = pe.Node(StructuralQC(), "measures")
# Project MNI segmentation to T1 space
invt = pe.MapNode(
ants.ApplyTransforms(
dimension=3, default_value=0, interpolation="Linear", float=True
),
iterfield=["input_image"],
name="MNItpms2t1",
)
invt.inputs.input_image = [
str(p)
for p in get_template(
config.workflow.template_id,
suffix="probseg",
resolution=1,
label=["CSF", "GM", "WM"],
)
]
datasink = pe.Node(
IQMFileSink(
out_dir=config.execution.output_dir,
dataset=config.execution.dsname,
),
name="datasink",
run_without_submitting=True,
)
def _getwm(inlist):
return inlist[-1]
# fmt: off
workflow.connect([
(inputnode, meta, [("in_file", "in_file")]),
(inputnode, datasink, [("in_file", "in_file"),
(("in_file", _get_mod), "modality")]),
(inputnode, addprov, [(("in_file", _get_mod), "modality")]),
(meta, datasink, [("subject", "subject_id"),
("session", "session_id"),
("task", "task_id"),
("acquisition", "acq_id"),
("reconstruction", "rec_id"),
("run", "run_id"),
("out_dict", "metadata")]),
(inputnode, addprov, [("in_file", "in_file"),
("airmask", "air_msk"),
("rotmask", "rot_msk")]),
(inputnode, getqi2, [("in_ras", "in_file"),
("hatmask", "air_msk")]),
(inputnode, homog, [("inu_corrected", "in_file"),
(("pvms", _getwm), "wm_mask")]),
(inputnode, measures, [("in_inu", "in_bias"),
("in_ras", "in_file"),
("airmask", "air_msk"),
("headmask", "head_msk"),
("artmask", "artifact_msk"),
("rotmask", "rot_msk"),
("segmentation", "in_segm"),
("pvms", "in_pvms")]),
(inputnode, fwhm, [("in_ras", "in_file"),
("brainmask", "mask")]),
(inputnode, invt, [("in_ras", "reference_image"),
("inverse_composite_transform", "transforms")]),
(homog, measures, [("out_file", "in_noinu")]),
(invt, measures, [("output_image", "mni_tpms")]),
(fwhm, measures, [(("fwhm", _tofloat), "in_fwhm")]),
(measures, datasink, [("out_qc", "root")]),
(addprov, datasink, [("out_prov", "provenance")]),
(getqi2, datasink, [("qi2", "qi_2")]),
(getqi2, outputnode, [("out_file", "noisefit")]),
(datasink, outputnode, [("out_file", "out_file")]),
])
# fmt: on
return workflow
def individual_reports(name="ReportsWorkflow"):
"""
Generate the components of the individual report.
.. workflow::
from mriqc.workflows.anatomical import individual_reports
from mriqc.testing import mock_config
with mock_config():
wf = individual_reports()
"""
from ..interfaces import PlotMosaic
from ..interfaces.reports import IndividualReport
verbose = config.execution.verbose_reports
pages = 2
extra_pages = int(verbose) * 7
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"in_ras",
"brainmask",
"headmask",
"airmask",
"artmask",
"rotmask",
"segmentation",
"inu_corrected",
"noisefit",
"in_iqms",
"mni_report",
"api_id",
]
),
name="inputnode",
)
mosaic_zoom = pe.Node(
PlotMosaic(out_file="plot_anat_mosaic1_zoomed.svg", cmap="Greys_r"),
name="PlotMosaicZoomed",
)
mosaic_noise = pe.Node(
PlotMosaic(
out_file="plot_anat_mosaic2_noise.svg",
only_noise=True,
cmap="viridis_r",
),
name="PlotMosaicNoise",
)
mplots = pe.Node(niu.Merge(pages + extra_pages), name="MergePlots")
rnode = pe.Node(IndividualReport(), name="GenerateReport")
# Link images that should be reported
dsplots = pe.Node(
nio.DataSink(
base_directory=str(config.execution.output_dir),
parameterization=False,
),
name="dsplots",
run_without_submitting=True,
)
# fmt: off
workflow.connect([
(inputnode, rnode, [("in_iqms", "in_iqms")]),
(inputnode, mosaic_zoom, [("in_ras", "in_file"),
("brainmask", "bbox_mask_file")]),
(inputnode, mosaic_noise, [("in_ras", "in_file")]),
(mosaic_zoom, mplots, [("out_file", "in1")]),
(mosaic_noise, mplots, [("out_file", "in2")]),
(mplots, rnode, [("out", "in_plots")]),
(rnode, dsplots, [("out_file", "@html_report")]),
])
# fmt: on
if not verbose:
return workflow
from ..interfaces.viz import PlotContours
plot_segm = pe.Node(
PlotContours(
display_mode="z",
levels=[0.5, 1.5, 2.5],
cut_coords=10,
colors=["r", "g", "b"],
),
name="PlotSegmentation",
)
plot_bmask = pe.Node(
PlotContours(
display_mode="z",
levels=[0.5],
colors=["r"],
cut_coords=10,
out_file="bmask",
),
name="PlotBrainmask",
)
plot_airmask = pe.Node(
PlotContours(
display_mode="x",
levels=[0.5],
colors=["r"],
cut_coords=6,
out_file="airmask",
),
name="PlotAirmask",
)
plot_headmask = pe.Node(
PlotContours(
display_mode="x",
levels=[0.5],
colors=["r"],
cut_coords=6,
out_file="headmask",
),
name="PlotHeadmask",
)
plot_artmask = pe.Node(
PlotContours(
display_mode="z",
levels=[0.5],
colors=["r"],
cut_coords=10,
out_file="artmask",
saturate=True,
),
name="PlotArtmask",
)
# fmt: off
workflow.connect([
(inputnode, plot_segm, [("in_ras", "in_file"),
("segmentation", "in_contours")]),
(inputnode, plot_bmask, [("in_ras", "in_file"),
("brainmask", "in_contours")]),
(inputnode, plot_headmask, [("in_ras", "in_file"),
("headmask", "in_contours")]),
(inputnode, plot_airmask, [("in_ras", "in_file"),
("airmask", "in_contours")]),
(inputnode, plot_artmask, [("in_ras", "in_file"),
("artmask", "in_contours")]),
(inputnode, mplots, [("mni_report", f"in{pages + 1}")]),
(plot_bmask, mplots, [("out_file", f"in{pages + 2}")]),
(plot_segm, mplots, [("out_file", f"in{pages + 3}")]),
(plot_artmask, mplots, [("out_file", f"in{pages + 4}")]),
(plot_headmask, mplots, [("out_file", f"in{pages + 5}")]),
(plot_airmask, mplots, [("out_file", f"in{pages + 6}")]),
(inputnode, mplots, [("noisefit", f"in{pages + 7}")]),
])
# fmt: on
return workflow
def headmsk_wf(name="HeadMaskWorkflow"):
"""
Computes a head mask as in [Mortamet2009]_.
.. workflow::
from mriqc.testing import mock_config
from mriqc.workflows.anatomical import headmsk_wf
with mock_config():
wf = headmsk_wf()
"""
use_bet = config.workflow.headmask.upper() == "BET"
has_dipy = False
if not use_bet:
try:
from dipy.denoise import nlmeans # noqa
has_dipy = True
except ImportError:
pass
if not use_bet and not has_dipy:
raise RuntimeError(
"DIPY is not installed and ``config.workflow.headmask`` is not BET."
)
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=["in_file", "in_segm"]), name="inputnode"
)
outputnode = pe.Node(niu.IdentityInterface(fields=["out_file"]), name="outputnode")
if use_bet:
# Alternative for when dipy is not installed
bet = pe.Node(fsl.BET(surfaces=True), name="fsl_bet")
# fmt: off
workflow.connect([
(inputnode, bet, [("in_file", "in_file")]),
(bet, outputnode, [('outskin_mask_file', "out_file")]),
])
# fmt: on
else:
from nipype.interfaces.dipy import Denoise
enhance = pe.Node(
niu.Function(
input_names=["in_file"],
output_names=["out_file"],
function=_enhance,
),
name="Enhance",
)
estsnr = pe.Node(
niu.Function(
input_names=["in_file", "seg_file"],
output_names=["out_snr"],
function=_estimate_snr,
),
name="EstimateSNR",
)
denoise = pe.Node(Denoise(), name="Denoise")
gradient = pe.Node(
niu.Function(
input_names=["in_file", "snr"],
output_names=["out_file"],
function=image_gradient,
),
name="Grad",
)
thresh = pe.Node(
niu.Function(
input_names=["in_file", "in_segm"],
output_names=["out_file"],
function=gradient_threshold,
),
name="GradientThreshold",
)
# fmt: off
workflow.connect([
(inputnode, estsnr, [("in_file", "in_file"),
("in_segm", "seg_file")]),
(estsnr, denoise, [("out_snr", "snr")]),
(inputnode, enhance, [("in_file", "in_file")]),
(enhance, denoise, [("out_file", "in_file")]),
(estsnr, gradient, [("out_snr", "snr")]),
(denoise, gradient, [("out_file", "in_file")]),
(inputnode, thresh, [("in_segm", "in_segm")]),
(gradient, thresh, [("out_file", "in_file")]),
(thresh, outputnode, [("out_file", "out_file")]),
])
# fmt: on
return workflow
def airmsk_wf(name="AirMaskWorkflow"):
"""
Implements the Step 1 of [Mortamet2009]_.
.. workflow::
from mriqc.testing import mock_config
from mriqc.workflows.anatomical import airmsk_wf
with mock_config():
wf = airmsk_wf()
"""
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"in_file",
"in_mask",
"head_mask",
"inverse_composite_transform",
]
),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["hat_mask", "air_mask", "art_mask", "rot_mask"]),
name="outputnode",
)
rotmsk = pe.Node(RotationMask(), name="RotationMask")
invt = pe.Node(
ants.ApplyTransforms(
dimension=3,
default_value=0,
interpolation="MultiLabel",
float=True,
),
name="invert_xfm",
)
invt.inputs.input_image = str(
get_template("MNI152NLin2009cAsym", resolution=1, desc="head", suffix="mask")
)
qi1 = pe.Node(ArtifactMask(), name="ArtifactMask")
# fmt: off
workflow.connect([
(inputnode, rotmsk, [("in_file", "in_file")]),
(inputnode, qi1, [("in_file", "in_file"),
("head_mask", "head_mask")]),
(rotmsk, qi1, [("out_file", "rot_mask")]),
(inputnode, invt, [("in_mask", "reference_image"),
("inverse_composite_transform", "transforms")]),
(invt, qi1, [("output_image", "nasion_post_mask")]),
(qi1, outputnode, [("out_hat_msk", "hat_mask"),
("out_air_msk", "air_mask"),
("out_art_msk", "art_mask")]),
(rotmsk, outputnode, [("out_file", "rot_mask")])
])
# fmt: on
return workflow
def _binarize(in_file, threshold=0.5, out_file=None):
import os.path as op
import nibabel as nb
import numpy as np
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("{}_bin{}".format(fname, ext))
nii = nb.load(in_file)
data = nii.get_data()
data[data <= threshold] = 0
data[data > 0] = 1
hdr = nii.header.copy()
hdr.set_data_dtype(np.uint8)
nb.Nifti1Image(data.astype(np.uint8), nii.affine, hdr).to_filename(out_file)
return out_file
def _estimate_snr(in_file, seg_file):
import nibabel as nb
import numpy as np
from mriqc.qc.anatomical import snr
data = nb.load(in_file).get_data()
mask = nb.load(seg_file).get_data() == 2 # WM label
out_snr = snr(np.mean(data[mask]), data[mask].std(), mask.sum())
return out_snr
def _enhance(in_file, out_file=None):
import os.path as op
import nibabel as nb
import numpy as np
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath(f"{fname}_enhanced{ext}")
imnii = nb.load(in_file)
data = imnii.get_data().astype(np.float32) # pylint: disable=no-member
range_max = np.percentile(data[data > 0], 99.98)
range_min = np.median(data[data > 0])
# Resample signal excess pixels
excess = np.where(data > range_max)
data[excess] = 0
data[excess] = np.random.choice(data[data > range_min], size=len(excess[0]))
nb.Nifti1Image(data, imnii.affine, imnii.header).to_filename(out_file)
return out_file
def image_gradient(in_file, snr, out_file=None):
"""Computes the magnitude gradient of an image using numpy"""
import os.path as op
import nibabel as nb
import numpy as np
from scipy.ndimage import gaussian_gradient_magnitude as gradient
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath(f"{fname}_grad{ext}")
imnii = nb.load(in_file)
data = imnii.get_data().astype(np.float32) # pylint: disable=no-member
datamax = np.percentile(data.reshape(-1), 99.5)
data *= 100 / datamax
grad = gradient(data, 3.0)
gradmax = np.percentile(grad.reshape(-1), 99.5)
grad *= 100.0
grad /= gradmax
nb.Nifti1Image(grad, imnii.affine, imnii.header).to_filename(out_file)
return out_file
def gradient_threshold(in_file, in_segm, thresh=1.0, out_file=None):
""" Compute a threshold from the histogram of the magnitude gradient image """
import os.path as op
import nibabel as nb
import numpy as np
from scipy import ndimage as sim
struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 2)
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath(f"{fname}_gradmask{ext}")
imnii = nb.load(in_file)
hdr = imnii.header.copy()
hdr.set_data_dtype(np.uint8) # pylint: disable=no-member
data = imnii.get_data().astype(np.float32)
mask = np.zeros_like(data, dtype=np.uint8) # pylint: disable=no-member
mask[data > 15.0] = 1
segdata = nb.load(in_segm).get_data().astype(np.uint8)
segdata[segdata > 0] = 1
segdata = sim.binary_dilation(segdata, struc, iterations=2, border_value=1).astype(
np.uint8
)
mask[segdata > 0] = 1
mask = sim.binary_closing(mask, struc, iterations=2).astype(np.uint8)
# Remove small objects
label_im, nb_labels = sim.label(mask)
artmsk = np.zeros_like(mask)
if nb_labels > 2:
sizes = sim.sum(mask, label_im, list(range(nb_labels + 1)))
ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
for _, label in ordered[2:]:
mask[label_im == label] = 0
artmsk[label_im == label] = 1
mask = sim.binary_fill_holes(mask, struc).astype(
np.uint8
) # pylint: disable=no-member
nb.Nifti1Image(mask, imnii.affine, hdr).to_filename(out_file)
return out_file
def _get_imgtype(in_file):
from pathlib import Path
return int(Path(in_file).name.rstrip(".gz").rstrip(".nii").split("_")[-1][1])
def _get_mod(in_file):
from pathlib import Path
return Path(in_file).name.rstrip(".gz").rstrip(".nii").split("_")[-1]
| bsd-3-clause | d517a0bae12e30ec252dd6a28e6be64e | 31.569781 | 98 | 0.55057 | 3.501736 | false | false | false | false |
poldracklab/mriqc | mriqc/viz/utils.py | 1 | 18968 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Visualization utilities."""
import math
import os.path as op
import matplotlib.pyplot as plt
import nibabel as nb
import numpy as np
import seaborn as sns
from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas
from matplotlib.gridspec import GridSpec
DEFAULT_DPI = 300
DINA4_LANDSCAPE = (11.69, 8.27)
DINA4_PORTRAIT = (8.27, 11.69)
def plot_slice(
dslice,
spacing=None,
cmap="Greys_r",
label=None,
ax=None,
vmax=None,
vmin=None,
annotate=False,
):
from matplotlib.cm import get_cmap
if isinstance(cmap, (str, bytes)):
cmap = get_cmap(cmap)
est_vmin, est_vmax = _get_limits(dslice)
if not vmin:
vmin = est_vmin
if not vmax:
vmax = est_vmax
if ax is None:
ax = plt.gca()
if spacing is None:
spacing = [1.0, 1.0]
phys_sp = np.array(spacing) * dslice.shape
ax.imshow(
np.swapaxes(dslice, 0, 1),
vmin=vmin,
vmax=vmax,
cmap=cmap,
interpolation="nearest",
origin="lower",
extent=[0, phys_sp[0], 0, phys_sp[1]],
)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(False)
ax.axis("off")
bgcolor = cmap(min(vmin, 0.0))
fgcolor = cmap(vmax)
if annotate:
ax.text(
0.95,
0.95,
"R",
color=fgcolor,
transform=ax.transAxes,
horizontalalignment="center",
verticalalignment="top",
size=18,
bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor),
)
ax.text(
0.05,
0.95,
"L",
color=fgcolor,
transform=ax.transAxes,
horizontalalignment="center",
verticalalignment="top",
size=18,
bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor),
)
if label is not None:
ax.text(
0.98,
0.01,
label,
color=fgcolor,
transform=ax.transAxes,
horizontalalignment="right",
verticalalignment="bottom",
size=18,
bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor),
)
return ax
def plot_slice_tern(
dslice,
prev=None,
post=None,
spacing=None,
cmap="Greys_r",
label=None,
ax=None,
vmax=None,
vmin=None,
):
from matplotlib.cm import get_cmap
if isinstance(cmap, (str, bytes)):
cmap = get_cmap(cmap)
est_vmin, est_vmax = _get_limits(dslice)
if not vmin:
vmin = est_vmin
if not vmax:
vmax = est_vmax
if ax is None:
ax = plt.gca()
if spacing is None:
spacing = [1.0, 1.0]
else:
spacing = [spacing[1], spacing[0]]
phys_sp = np.array(spacing) * dslice.shape
if prev is None:
prev = np.ones_like(dslice)
if post is None:
post = np.ones_like(dslice)
combined = np.swapaxes(np.vstack((prev, dslice, post)), 0, 1)
ax.imshow(
combined,
vmin=vmin,
vmax=vmax,
cmap=cmap,
interpolation="nearest",
origin="lower",
extent=[0, phys_sp[1] * 3, 0, phys_sp[0]],
)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(False)
if label is not None:
ax.text(
0.5,
0.05,
label,
transform=ax.transAxes,
horizontalalignment="center",
verticalalignment="top",
size=14,
bbox=dict(boxstyle="square,pad=0", ec="k", fc="k"),
color="w",
)
def plot_spikes(
in_file,
in_fft,
spikes_list,
cols=3,
labelfmt="t={0:.3f}s (z={1:d})",
out_file=None,
):
from mpl_toolkits.axes_grid1 import make_axes_locatable
nii = nb.as_closest_canonical(nb.load(in_file))
fft = nb.load(in_fft).get_data()
data = nii.get_data()
zooms = nii.header.get_zooms()[:2]
tstep = nii.header.get_zooms()[-1]
ntpoints = data.shape[-1]
if len(spikes_list) > cols * 7:
cols += 1
nspikes = len(spikes_list)
rows = 1
if nspikes > cols:
rows = math.ceil(nspikes / cols)
fig = plt.figure(figsize=(7 * cols, 5 * rows))
for i, (t, z) in enumerate(spikes_list):
prev = None
pvft = None
if t > 0:
prev = data[..., z, t - 1]
pvft = fft[..., z, t - 1]
post = None
psft = None
if t < (ntpoints - 1):
post = data[..., z, t + 1]
psft = fft[..., z, t + 1]
ax1 = fig.add_subplot(rows, cols, i + 1)
divider = make_axes_locatable(ax1)
ax2 = divider.new_vertical(size="100%", pad=0.1)
fig.add_axes(ax2)
plot_slice_tern(
data[..., z, t],
prev=prev,
post=post,
spacing=zooms,
ax=ax2,
label=labelfmt.format(t * tstep, z),
)
plot_slice_tern(
fft[..., z, t],
prev=pvft,
post=psft,
vmin=-5,
vmax=5,
cmap=get_parula(),
ax=ax1,
)
plt.tight_layout()
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, _ = op.splitext(fname)
out_file = op.abspath("%s.svg" % fname)
fig.savefig(out_file, format="svg", dpi=300, bbox_inches="tight")
return out_file
def plot_mosaic(
img,
out_file=None,
ncols=8,
title=None,
overlay_mask=None,
bbox_mask_file=None,
only_plot_noise=False,
annotate=True,
vmin=None,
vmax=None,
cmap="Greys_r",
plot_sagittal=True,
fig=None,
zmax=128,
):
if isinstance(img, (str, bytes)):
nii = nb.as_closest_canonical(nb.load(img))
img_data = nii.get_data()
zooms = nii.header.get_zooms()
else:
img_data = img
zooms = [1.0, 1.0, 1.0]
out_file = "mosaic.svg"
# Remove extra dimensions
img_data = np.squeeze(img_data)
if img_data.shape[2] > zmax and bbox_mask_file is None:
lowthres = np.percentile(img_data, 5)
mask_file = np.ones_like(img_data)
mask_file[img_data <= lowthres] = 0
img_data = _bbox(img_data, mask_file)
if bbox_mask_file is not None:
bbox_data = nb.as_closest_canonical(nb.load(bbox_mask_file)).get_data()
img_data = _bbox(img_data, bbox_data)
z_vals = np.array(list(range(0, img_data.shape[2])))
# Reduce the number of slices shown
if len(z_vals) > zmax:
rem = 15
# Crop inferior and posterior
if not bbox_mask_file:
# img_data = img_data[..., rem:-rem]
z_vals = z_vals[rem:-rem]
else:
# img_data = img_data[..., 2 * rem:]
start_index = 2 * rem
z_vals = z_vals[start_index:]
while len(z_vals) > zmax:
# Discard one every two slices
# img_data = img_data[..., ::2]
z_vals = z_vals[::2]
n_images = len(z_vals)
nrows = math.ceil(n_images / ncols)
if plot_sagittal:
nrows += 1
if overlay_mask:
overlay_data = nb.as_closest_canonical(nb.load(overlay_mask)).get_data()
# create figures
if fig is None:
fig = plt.figure(figsize=(22, nrows * 3))
est_vmin, est_vmax = _get_limits(img_data, only_plot_noise=only_plot_noise)
if not vmin:
vmin = est_vmin
if not vmax:
vmax = est_vmax
naxis = 1
for z_val in z_vals:
ax = fig.add_subplot(nrows, ncols, naxis)
if overlay_mask:
ax.set_rasterized(True)
plot_slice(
img_data[:, :, z_val],
vmin=vmin,
vmax=vmax,
cmap=cmap,
ax=ax,
spacing=zooms[:2],
label="%d" % z_val,
annotate=annotate,
)
if overlay_mask:
from matplotlib import cm
msk_cmap = cm.Reds # @UndefinedVariable
msk_cmap._init()
alphas = np.linspace(0, 0.75, msk_cmap.N + 3)
msk_cmap._lut[:, -1] = alphas
plot_slice(
overlay_data[:, :, z_val],
vmin=0,
vmax=1,
cmap=msk_cmap,
ax=ax,
spacing=zooms[:2],
)
naxis += 1
if plot_sagittal:
naxis = ncols * (nrows - 1) + 1
step = int(img_data.shape[0] / (ncols + 1))
start = step
stop = img_data.shape[0] - step
if step == 0:
step = 1
for x_val in list(range(start, stop, step))[:ncols]:
ax = fig.add_subplot(nrows, ncols, naxis)
plot_slice(
img_data[x_val, ...],
vmin=vmin,
vmax=vmax,
cmap=cmap,
ax=ax,
label="%d" % x_val,
spacing=[zooms[0], zooms[2]],
)
naxis += 1
fig.subplots_adjust(
left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05
)
if title:
fig.suptitle(title, fontsize="10")
fig.subplots_adjust(wspace=0.002, hspace=0.002)
if out_file is None:
fname, ext = op.splitext(op.basename(img))
if ext == ".gz":
fname, _ = op.splitext(fname)
out_file = op.abspath(fname + "_mosaic.svg")
fig.savefig(out_file, format="svg", dpi=300, bbox_inches="tight")
return out_file
def plot_fd(fd_file, fd_radius, mean_fd_dist=None, figsize=DINA4_LANDSCAPE):
fd_power = _calc_fd(fd_file, fd_radius)
fig = plt.Figure(figsize=figsize)
FigureCanvas(fig)
if mean_fd_dist:
grid = GridSpec(2, 4)
else:
grid = GridSpec(1, 2, width_ratios=[3, 1])
grid.update(hspace=1.0, right=0.95, left=0.1, bottom=0.2)
ax = fig.add_subplot(grid[0, :-1])
ax.plot(fd_power)
ax.set_xlim((0, len(fd_power)))
ax.set_ylabel("Frame Displacement [mm]")
ax.set_xlabel("Frame number")
ylim = ax.get_ylim()
ax = fig.add_subplot(grid[0, -1])
sns.distplot(fd_power, vertical=True, ax=ax)
ax.set_ylim(ylim)
if mean_fd_dist:
ax = fig.add_subplot(grid[1, :])
sns.distplot(mean_fd_dist, ax=ax)
ax.set_xlabel("Mean Frame Displacement (over all subjects) [mm]")
mean_fd = fd_power.mean()
label = r"$\overline{{\text{{FD}}}}$ = {0:g}".format(mean_fd)
plot_vline(mean_fd, label, ax=ax)
return fig
def plot_dist(
main_file,
mask_file,
xlabel,
distribution=None,
xlabel2=None,
figsize=DINA4_LANDSCAPE,
):
data = _get_values_inside_a_mask(main_file, mask_file)
fig = plt.Figure(figsize=figsize)
FigureCanvas(fig)
gsp = GridSpec(2, 1)
ax = fig.add_subplot(gsp[0, 0])
sns.distplot(data.astype(np.double), kde=False, bins=100, ax=ax)
ax.set_xlabel(xlabel)
ax = fig.add_subplot(gsp[1, 0])
sns.distplot(np.array(distribution).astype(np.double), ax=ax)
cur_val = np.median(data)
label = "{0!g}".format(cur_val)
plot_vline(cur_val, label, ax=ax)
ax.set_xlabel(xlabel2)
return fig
def plot_vline(cur_val, label, ax):
ax.axvline(cur_val)
ylim = ax.get_ylim()
vloc = (ylim[0] + ylim[1]) / 2.0
xlim = ax.get_xlim()
pad = (xlim[0] + xlim[1]) / 100.0
ax.text(
cur_val - pad,
vloc,
label,
color="blue",
rotation=90,
verticalalignment="center",
horizontalalignment="right",
)
def _calc_rows_columns(ratio, n_images):
rows = 2
for _ in range(100):
columns = math.floor(ratio * rows)
total = (rows - 1) * columns
if total > n_images:
rows = np.ceil(n_images / columns) + 1
break
rows += 1
return int(rows), int(columns)
def _calc_fd(fd_file, fd_radius):
from math import pi
lines = open(fd_file, "r").readlines()
rows = [[float(x) for x in line.split()] for line in lines]
cols = np.array([list(col) for col in zip(*rows)])
translations = np.transpose(np.abs(np.diff(cols[0:3, :])))
rotations = np.transpose(np.abs(np.diff(cols[3:6, :])))
fd_power = np.sum(translations, axis=1) + (fd_radius * pi / 180) * np.sum(
rotations, axis=1
)
# FD is zero for the first time point
fd_power = np.insert(fd_power, 0, 0)
return fd_power
def _get_mean_fd_distribution(fd_files, fd_radius):
mean_fds = []
max_fds = []
for fd_file in fd_files:
fd_power = _calc_fd(fd_file, fd_radius)
mean_fds.append(fd_power.mean())
max_fds.append(fd_power.max())
return mean_fds, max_fds
def _get_values_inside_a_mask(main_file, mask_file):
main_nii = nb.load(main_file)
main_data = main_nii.get_data()
nan_mask = np.logical_not(np.isnan(main_data))
mask = nb.load(mask_file).get_data() > 0
data = main_data[np.logical_and(nan_mask, mask)]
return data
def plot_segmentation(anat_file, segmentation, out_file, **kwargs):
from nilearn.plotting import plot_anat
vmax = kwargs.get("vmax")
vmin = kwargs.get("vmin")
if kwargs.get("saturate", False):
vmax = np.percentile(nb.load(anat_file).get_data().reshape(-1), 70)
if vmax is None and vmin is None:
vmin = np.percentile(nb.load(anat_file).get_data().reshape(-1), 10)
vmax = np.percentile(nb.load(anat_file).get_data().reshape(-1), 99)
disp = plot_anat(
anat_file,
display_mode=kwargs.get("display_mode", "ortho"),
cut_coords=kwargs.get("cut_coords", 8),
title=kwargs.get("title"),
vmax=vmax,
vmin=vmin,
)
disp.add_contours(
segmentation,
levels=kwargs.get("levels", [1]),
colors=kwargs.get("colors", "r"),
)
disp.savefig(out_file)
disp.close()
disp = None
return out_file
def _get_limits(nifti_file, only_plot_noise=False):
if isinstance(nifti_file, str):
nii = nb.as_closest_canonical(nb.load(nifti_file))
data = nii.get_data()
else:
data = nifti_file
data_mask = np.logical_not(np.isnan(data))
if only_plot_noise:
data_mask = np.logical_and(data_mask, data != 0)
vmin = np.percentile(data[data_mask], 0)
vmax = np.percentile(data[data_mask], 61)
else:
vmin = np.percentile(data[data_mask], 0.5)
vmax = np.percentile(data[data_mask], 99.5)
return vmin, vmax
def _bbox(img_data, bbox_data):
B = np.argwhere(bbox_data)
(ystart, xstart, zstart), (ystop, xstop, zstop) = B.min(0), B.max(0) + 1
return img_data[ystart:ystop, xstart:xstop, zstart:zstop]
def get_parula():
from matplotlib.colors import LinearSegmentedColormap
cm_data = [
[0.2081, 0.1663, 0.5292],
[0.2116238095, 0.1897809524, 0.5776761905],
[0.212252381, 0.2137714286, 0.6269714286],
[0.2081, 0.2386, 0.6770857143],
[0.1959047619, 0.2644571429, 0.7279],
[0.1707285714, 0.2919380952, 0.779247619],
[0.1252714286, 0.3242428571, 0.8302714286],
[0.0591333333, 0.3598333333, 0.8683333333],
[0.0116952381, 0.3875095238, 0.8819571429],
[0.0059571429, 0.4086142857, 0.8828428571],
[0.0165142857, 0.4266, 0.8786333333],
[0.032852381, 0.4430428571, 0.8719571429],
[0.0498142857, 0.4585714286, 0.8640571429],
[0.0629333333, 0.4736904762, 0.8554380952],
[0.0722666667, 0.4886666667, 0.8467],
[0.0779428571, 0.5039857143, 0.8383714286],
[0.079347619, 0.5200238095, 0.8311809524],
[0.0749428571, 0.5375428571, 0.8262714286],
[0.0640571429, 0.5569857143, 0.8239571429],
[0.0487714286, 0.5772238095, 0.8228285714],
[0.0343428571, 0.5965809524, 0.819852381],
[0.0265, 0.6137, 0.8135],
[0.0238904762, 0.6286619048, 0.8037619048],
[0.0230904762, 0.6417857143, 0.7912666667],
[0.0227714286, 0.6534857143, 0.7767571429],
[0.0266619048, 0.6641952381, 0.7607190476],
[0.0383714286, 0.6742714286, 0.743552381],
[0.0589714286, 0.6837571429, 0.7253857143],
[0.0843, 0.6928333333, 0.7061666667],
[0.1132952381, 0.7015, 0.6858571429],
[0.1452714286, 0.7097571429, 0.6646285714],
[0.1801333333, 0.7176571429, 0.6424333333],
[0.2178285714, 0.7250428571, 0.6192619048],
[0.2586428571, 0.7317142857, 0.5954285714],
[0.3021714286, 0.7376047619, 0.5711857143],
[0.3481666667, 0.7424333333, 0.5472666667],
[0.3952571429, 0.7459, 0.5244428571],
[0.4420095238, 0.7480809524, 0.5033142857],
[0.4871238095, 0.7490619048, 0.4839761905],
[0.5300285714, 0.7491142857, 0.4661142857],
[0.5708571429, 0.7485190476, 0.4493904762],
[0.609852381, 0.7473142857, 0.4336857143],
[0.6473, 0.7456, 0.4188],
[0.6834190476, 0.7434761905, 0.4044333333],
[0.7184095238, 0.7411333333, 0.3904761905],
[0.7524857143, 0.7384, 0.3768142857],
[0.7858428571, 0.7355666667, 0.3632714286],
[0.8185047619, 0.7327333333, 0.3497904762],
[0.8506571429, 0.7299, 0.3360285714],
[0.8824333333, 0.7274333333, 0.3217],
[0.9139333333, 0.7257857143, 0.3062761905],
[0.9449571429, 0.7261142857, 0.2886428571],
[0.9738952381, 0.7313952381, 0.266647619],
[0.9937714286, 0.7454571429, 0.240347619],
[0.9990428571, 0.7653142857, 0.2164142857],
[0.9955333333, 0.7860571429, 0.196652381],
[0.988, 0.8066, 0.1793666667],
[0.9788571429, 0.8271428571, 0.1633142857],
[0.9697, 0.8481380952, 0.147452381],
[0.9625857143, 0.8705142857, 0.1309],
[0.9588714286, 0.8949, 0.1132428571],
[0.9598238095, 0.9218333333, 0.0948380952],
[0.9661, 0.9514428571, 0.0755333333],
[0.9763, 0.9831, 0.0538],
]
return LinearSegmentedColormap.from_list("parula", cm_data)
| bsd-3-clause | a61b2d50932af6d0da472f151766d822 | 26.935199 | 80 | 0.561683 | 2.964676 | false | false | false | false |
poldracklab/mriqc | mriqc/bin/labeler.py | 2 | 3116 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
import csv
import os
import random
import sys
import webbrowser
import numpy as np
def num_rows(data):
for j in range(1, 4):
if len(data[j]) == 0:
return j
return 4
def main():
"""read the input file"""
print("Reading file sinfo.csv")
csvfile = open("sinfo.csv", "rb")
csvreader = csv.reader(csvfile)
file = list(csvreader)
# display statistics
finished = [0.0, 0.0, 0.0]
hold = np.zeros((3, len(file) - 1))
hold[:] = np.nan
total = 601
for i in range(1, len(file)):
for j in range(1, 4):
if len(file[i][j]) > 0:
finished[j - 1] = finished[j - 1] + 1
hold[j - 1, i - 1] = int(file[i][j])
finished = np.divide(np.round(np.divide(finished, total) * 1000), 10)
print(f"Completed: {' '.join(['%g%%' % f for f in finished])}")
print(f"Total: {np.round(np.divide(np.sum(finished), 3))}%")
input("Waiting: [enter]")
# file[1:] are all the rows
order = range(1, len(file))
random.shuffle(order)
# pick a random row
for row in order:
# check how many entires it has
curEnt = num_rows(file[row])
if curEnt <= 1:
# if less than 1, run the row
print("Check participant #" + file[row][0])
fname = os.getcwd() + "/abide/" + file[row][0]
if os.path.isfile(fname):
webbrowser.open("file://" + fname)
quality = input("Quality? [-1/0/1/e/c] ")
if quality == "e":
break
if quality == "c":
print("Current comment: " + file[row][4])
comment = input("Comment: ")
if len(comment) > 0:
file[row][4] = comment
quality = input("Quality? [-1/0/1/e] ")
if quality == "e":
break
file[row][curEnt] = quality
else:
print("File does not exist")
print("Writing file sinfo.csv")
outfile = open("sinfo.csv", "wb")
csvwriter = csv.writer(outfile)
csvwriter.writerows(file)
print("Ending")
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause | 03b63710d6510bbf0bbdbada91260c45 | 30.795918 | 74 | 0.563864 | 3.548975 | false | false | false | false |
scrapy/parsel | parsel/selector.py | 1 | 21147 | """
XPath selectors based on lxml
"""
import typing
import warnings
from typing import (
Any,
Dict,
List,
Mapping,
Optional,
Pattern,
Type,
TypeVar,
Union,
)
from warnings import warn
from cssselect import GenericTranslator as OriginalGenericTranslator
from lxml import etree, html
from packaging.version import Version
from .csstranslator import GenericTranslator, HTMLTranslator
from .utils import extract_regex, flatten, iflatten, shorten
if typing.TYPE_CHECKING:
# both require Python 3.8
from typing import Literal, SupportsIndex
# simplified _OutputMethodArg from types-lxml
_TostringMethodType = Literal[
"html",
"xml",
]
_SelectorType = TypeVar("_SelectorType", bound="Selector")
_ParserType = Union[etree.XMLParser, etree.HTMLParser]
lxml_version = Version(etree.__version__)
lxml_huge_tree_version = Version("4.2")
LXML_SUPPORTS_HUGE_TREE = lxml_version >= lxml_huge_tree_version
class CannotRemoveElementWithoutRoot(Exception):
pass
class CannotRemoveElementWithoutParent(Exception):
pass
class CannotDropElementWithoutParent(CannotRemoveElementWithoutParent):
pass
class SafeXMLParser(etree.XMLParser):
def __init__(self, *args, **kwargs) -> None:
kwargs.setdefault("resolve_entities", False)
super().__init__(*args, **kwargs)
_ctgroup = {
"html": {
"_parser": html.HTMLParser,
"_csstranslator": HTMLTranslator(),
"_tostring_method": "html",
},
"xml": {
"_parser": SafeXMLParser,
"_csstranslator": GenericTranslator(),
"_tostring_method": "xml",
},
}
def _st(st: Optional[str]) -> str:
if st is None:
return "html"
elif st in _ctgroup:
return st
else:
raise ValueError(f"Invalid type: {st}")
def create_root_node(
text: str,
parser_cls: Type[_ParserType],
base_url: Optional[str] = None,
huge_tree: bool = LXML_SUPPORTS_HUGE_TREE,
) -> etree._Element:
"""Create root node for text using given parser class."""
body = text.strip().replace("\x00", "").encode("utf8") or b"<html/>"
if huge_tree and LXML_SUPPORTS_HUGE_TREE:
parser = parser_cls(recover=True, encoding="utf8", huge_tree=True)
# the stub wrongly thinks base_url can't be None
root = etree.fromstring(body, parser=parser, base_url=base_url) # type: ignore[arg-type]
else:
parser = parser_cls(recover=True, encoding="utf8")
root = etree.fromstring(body, parser=parser, base_url=base_url) # type: ignore[arg-type]
for error in parser.error_log:
if "use XML_PARSE_HUGE option" in error.message:
warnings.warn(
f"Input data is too big. Upgrade to lxml "
f"{lxml_huge_tree_version} or later for huge_tree support."
)
if root is None:
root = etree.fromstring(b"<html/>", parser=parser, base_url=base_url)
return root
class SelectorList(List[_SelectorType]):
"""
The :class:`SelectorList` class is a subclass of the builtin ``list``
class, which provides a few additional methods.
"""
@typing.overload
def __getitem__(self, pos: "SupportsIndex") -> _SelectorType:
pass
@typing.overload
def __getitem__(self, pos: slice) -> "SelectorList[_SelectorType]":
pass
def __getitem__(
self, pos: Union["SupportsIndex", slice]
) -> Union[_SelectorType, "SelectorList[_SelectorType]"]:
o = super().__getitem__(pos)
if isinstance(pos, slice):
return self.__class__(
typing.cast("SelectorList[_SelectorType]", o)
)
else:
return typing.cast(_SelectorType, o)
def __getstate__(self) -> None:
raise TypeError("can't pickle SelectorList objects")
def xpath(
self,
xpath: str,
namespaces: Optional[Mapping[str, str]] = None,
**kwargs,
) -> "SelectorList[_SelectorType]":
"""
Call the ``.xpath()`` method for each element in this list and return
their results flattened as another :class:`SelectorList`.
``query`` is the same argument as the one in :meth:`Selector.xpath`
``namespaces`` is an optional ``prefix: namespace-uri`` mapping (dict)
for additional prefixes to those registered with ``register_namespace(prefix, uri)``.
Contrary to ``register_namespace()``, these prefixes are not
saved for future calls.
Any additional named arguments can be used to pass values for XPath
variables in the XPath expression, e.g.::
selector.xpath('//a[href=$url]', url="http://www.example.com")
"""
return self.__class__(
flatten(
[x.xpath(xpath, namespaces=namespaces, **kwargs) for x in self]
)
)
def css(self, query: str) -> "SelectorList[_SelectorType]":
"""
Call the ``.css()`` method for each element in this list and return
their results flattened as another :class:`SelectorList`.
``query`` is the same argument as the one in :meth:`Selector.css`
"""
return self.__class__(flatten([x.css(query) for x in self]))
def re(
self, regex: Union[str, Pattern[str]], replace_entities: bool = True
) -> List[str]:
"""
Call the ``.re()`` method for each element in this list and return
their results flattened, as a list of strings.
By default, character entity references are replaced by their
corresponding character (except for ``&`` and ``<``.
Passing ``replace_entities`` as ``False`` switches off these
replacements.
"""
return flatten(
[x.re(regex, replace_entities=replace_entities) for x in self]
)
@typing.overload
def re_first(
self,
regex: Union[str, Pattern[str]],
default: None = None,
replace_entities: bool = True,
) -> Optional[str]:
pass
@typing.overload
def re_first(
self,
regex: Union[str, Pattern[str]],
default: str,
replace_entities: bool = True,
) -> str:
pass
def re_first(
self,
regex: Union[str, Pattern[str]],
default: Optional[str] = None,
replace_entities: bool = True,
) -> Optional[str]:
"""
Call the ``.re()`` method for the first element in this list and
return the result in an string. If the list is empty or the
regex doesn't match anything, return the default value (``None`` if
the argument is not provided).
By default, character entity references are replaced by their
corresponding character (except for ``&`` and ``<``.
Passing ``replace_entities`` as ``False`` switches off these
replacements.
"""
for el in iflatten(
x.re(regex, replace_entities=replace_entities) for x in self
):
return el
return default
def getall(self) -> List[str]:
"""
Call the ``.get()`` method for each element is this list and return
their results flattened, as a list of strings.
"""
return [x.get() for x in self]
extract = getall
@typing.overload
def get(self, default: None = None) -> Optional[str]:
pass
@typing.overload
def get(self, default: str) -> str:
pass
def get(self, default: Optional[str] = None) -> Optional[str]:
"""
Return the result of ``.get()`` for the first element in this list.
If the list is empty, return the default value.
"""
for x in self:
return x.get()
return default
extract_first = get
@property
def attrib(self) -> Mapping[str, str]:
"""Return the attributes dictionary for the first element.
If the list is empty, return an empty dict.
"""
for x in self:
return x.attrib
return {}
def remove(self) -> None: # type: ignore[override]
"""
Remove matched nodes from the parent for each element in this list.
"""
warn(
"Method parsel.selector.SelectorList.remove is deprecated, please use parsel.selector.SelectorList.drop method instead",
category=DeprecationWarning,
stacklevel=2,
)
for x in self:
x.remove()
def drop(self) -> None:
"""
Drop matched nodes from the parent for each element in this list.
"""
for x in self:
x.drop()
class Selector:
"""
:class:`Selector` allows you to select parts of an XML or HTML text using CSS
or XPath expressions and extract data from it.
``text`` is a `str`` object
``type`` defines the selector type, it can be ``"html"``, ``"xml"`` or ``None`` (default).
If ``type`` is ``None``, the selector defaults to ``"html"``.
``base_url`` allows setting a URL for the document. This is needed when looking up external entities with relative paths.
See the documentation for :func:`lxml.etree.fromstring` for more information.
``huge_tree`` controls the lxml/libxml2 feature that forbids parsing
certain large documents to protect from possible memory exhaustion. The
argument is ``True`` by default if the installed lxml version supports it,
which disables the protection to allow parsing such documents. Set it to
``False`` if you want to enable the protection.
See `this lxml FAQ entry <https://lxml.de/FAQ.html#is-lxml-vulnerable-to-xml-bombs>`_
for more information.
"""
__slots__ = [
"text",
"namespaces",
"type",
"_expr",
"root",
"__weakref__",
"_parser",
"_csstranslator",
"_tostring_method",
]
_default_type: Optional[str] = None
_default_namespaces = {
"re": "http://exslt.org/regular-expressions",
# supported in libxslt:
# set:difference
# set:has-same-node
# set:intersection
# set:leading
# set:trailing
"set": "http://exslt.org/sets",
}
_lxml_smart_strings = False
selectorlist_cls = SelectorList["Selector"]
def __init__(
self,
text: Optional[str] = None,
type: Optional[str] = None,
namespaces: Optional[Mapping[str, str]] = None,
root: Optional[Any] = None,
base_url: Optional[str] = None,
_expr: Optional[str] = None,
huge_tree: bool = LXML_SUPPORTS_HUGE_TREE,
) -> None:
self.type = st = _st(type or self._default_type)
self._parser: Type[_ParserType] = typing.cast(
Type[_ParserType], _ctgroup[st]["_parser"]
)
self._csstranslator: OriginalGenericTranslator = typing.cast(
OriginalGenericTranslator, _ctgroup[st]["_csstranslator"]
)
self._tostring_method: "_TostringMethodType" = typing.cast(
"_TostringMethodType", _ctgroup[st]["_tostring_method"]
)
if text is not None:
if not isinstance(text, str):
msg = f"text argument should be of type str, got {text.__class__}"
raise TypeError(msg)
root = self._get_root(text, base_url, huge_tree)
elif root is None:
raise ValueError("Selector needs either text or root argument")
self.namespaces = dict(self._default_namespaces)
if namespaces is not None:
self.namespaces.update(namespaces)
self.root = root
self._expr = _expr
def __getstate__(self) -> Any:
raise TypeError("can't pickle Selector objects")
def _get_root(
self,
text: str,
base_url: Optional[str] = None,
huge_tree: bool = LXML_SUPPORTS_HUGE_TREE,
) -> etree._Element:
return create_root_node(
text, self._parser, base_url=base_url, huge_tree=huge_tree
)
def xpath(
self: _SelectorType,
query: str,
namespaces: Optional[Mapping[str, str]] = None,
**kwargs,
) -> SelectorList[_SelectorType]:
"""
Find nodes matching the xpath ``query`` and return the result as a
:class:`SelectorList` instance with all elements flattened. List
elements implement :class:`Selector` interface too.
``query`` is a string containing the XPATH query to apply.
``namespaces`` is an optional ``prefix: namespace-uri`` mapping (dict)
for additional prefixes to those registered with ``register_namespace(prefix, uri)``.
Contrary to ``register_namespace()``, these prefixes are not
saved for future calls.
Any additional named arguments can be used to pass values for XPath
variables in the XPath expression, e.g.::
selector.xpath('//a[href=$url]', url="http://www.example.com")
"""
try:
xpathev = self.root.xpath
except AttributeError:
return typing.cast(
SelectorList[_SelectorType], self.selectorlist_cls([])
)
nsp = dict(self.namespaces)
if namespaces is not None:
nsp.update(namespaces)
try:
result = xpathev(
query,
namespaces=nsp,
smart_strings=self._lxml_smart_strings,
**kwargs,
)
except etree.XPathError as exc:
raise ValueError(f"XPath error: {exc} in {query}")
if type(result) is not list:
result = [result]
result = [
self.__class__(
root=x, _expr=query, namespaces=self.namespaces, type=self.type
)
for x in result
]
return typing.cast(
SelectorList[_SelectorType], self.selectorlist_cls(result)
)
def css(self: _SelectorType, query: str) -> SelectorList[_SelectorType]:
"""
Apply the given CSS selector and return a :class:`SelectorList` instance.
``query`` is a string containing the CSS selector to apply.
In the background, CSS queries are translated into XPath queries using
`cssselect`_ library and run ``.xpath()`` method.
.. _cssselect: https://pypi.python.org/pypi/cssselect/
"""
return self.xpath(self._css2xpath(query))
def _css2xpath(self, query: str) -> str:
return self._csstranslator.css_to_xpath(query)
def re(
self, regex: Union[str, Pattern[str]], replace_entities: bool = True
) -> List[str]:
"""
Apply the given regex and return a list of strings with the
matches.
``regex`` can be either a compiled regular expression or a string which
will be compiled to a regular expression using ``re.compile(regex)``.
By default, character entity references are replaced by their
corresponding character (except for ``&`` and ``<``).
Passing ``replace_entities`` as ``False`` switches off these
replacements.
"""
return extract_regex(
regex, self.get(), replace_entities=replace_entities
)
@typing.overload
def re_first(
self,
regex: Union[str, Pattern[str]],
default: None = None,
replace_entities: bool = True,
) -> Optional[str]:
pass
@typing.overload
def re_first(
self,
regex: Union[str, Pattern[str]],
default: str,
replace_entities: bool = True,
) -> str:
pass
def re_first(
self,
regex: Union[str, Pattern[str]],
default: Optional[str] = None,
replace_entities: bool = True,
) -> Optional[str]:
"""
Apply the given regex and return the first string which matches. If
there is no match, return the default value (``None`` if the argument
is not provided).
By default, character entity references are replaced by their
corresponding character (except for ``&`` and ``<``).
Passing ``replace_entities`` as ``False`` switches off these
replacements.
"""
return next(
iflatten(self.re(regex, replace_entities=replace_entities)),
default,
)
def get(self) -> str:
"""
Serialize and return the matched nodes in a single string.
Percent encoded content is unquoted.
"""
try:
return etree.tostring(
self.root,
method=self._tostring_method,
encoding="unicode",
with_tail=False,
)
except (AttributeError, TypeError):
if self.root is True:
return "1"
elif self.root is False:
return "0"
else:
return str(self.root)
extract = get
def getall(self) -> List[str]:
"""
Serialize and return the matched node in a 1-element list of strings.
"""
return [self.get()]
def register_namespace(self, prefix: str, uri: str) -> None:
"""
Register the given namespace to be used in this :class:`Selector`.
Without registering namespaces you can't select or extract data from
non-standard namespaces. See :ref:`selector-examples-xml`.
"""
self.namespaces[prefix] = uri
def remove_namespaces(self) -> None:
"""
Remove all namespaces, allowing to traverse the document using
namespace-less xpaths. See :ref:`removing-namespaces`.
"""
for el in self.root.iter("*"):
if el.tag.startswith("{"):
el.tag = el.tag.split("}", 1)[1]
# loop on element attributes also
for an in el.attrib:
if an.startswith("{"):
# this cast shouldn't be needed as pop never returns None
el.attrib[an.split("}", 1)[1]] = typing.cast(
str, el.attrib.pop(an)
)
# remove namespace declarations
etree.cleanup_namespaces(self.root)
def remove(self) -> None:
"""
Remove matched nodes from the parent element.
"""
warn(
"Method parsel.selector.Selector.remove is deprecated, please use parsel.selector.Selector.drop method instead",
category=DeprecationWarning,
stacklevel=2,
)
try:
parent = self.root.getparent()
except AttributeError:
# 'str' object has no attribute 'getparent'
raise CannotRemoveElementWithoutRoot(
"The node you're trying to remove has no root, "
"are you trying to remove a pseudo-element? "
"Try to use 'li' as a selector instead of 'li::text' or "
"'//li' instead of '//li/text()', for example."
)
try:
parent.remove(self.root) # type: ignore[union-attr]
except AttributeError:
# 'NoneType' object has no attribute 'remove'
raise CannotRemoveElementWithoutParent(
"The node you're trying to remove has no parent, "
"are you trying to remove a root element?"
)
def drop(self):
"""
Drop matched nodes from the parent element.
"""
try:
parent = self.root.getparent()
except AttributeError:
# 'str' object has no attribute 'getparent'
raise CannotRemoveElementWithoutRoot(
"The node you're trying to drop has no root, "
"are you trying to drop a pseudo-element? "
"Try to use 'li' as a selector instead of 'li::text' or "
"'//li' instead of '//li/text()', for example."
)
try:
if self.type == "xml":
parent.remove(self.root)
else:
self.root.drop_tree()
except (AttributeError, AssertionError):
# 'NoneType' object has no attribute 'drop'
raise CannotDropElementWithoutParent(
"The node you're trying to remove has no parent, "
"are you trying to remove a root element?"
)
@property
def attrib(self) -> Dict[str, str]:
"""Return the attributes dictionary for underlying element."""
return dict(self.root.attrib)
def __bool__(self) -> bool:
"""
Return ``True`` if there is any real content selected or ``False``
otherwise. In other words, the boolean value of a :class:`Selector` is
given by the contents it selects.
"""
return bool(self.get())
__nonzero__ = __bool__
def __str__(self) -> str:
data = repr(shorten(self.get(), width=40))
return f"<{type(self).__name__} xpath={self._expr!r} data={data}>"
__repr__ = __str__
| bsd-3-clause | 450aad471083f3f9a9a3c784f608d44b | 31.634259 | 132 | 0.578333 | 4.289452 | false | false | false | false |
scrapy/parsel | tests/typing/selector.py | 1 | 1758 | # Basic usage of the Selector, strongly typed to test the typing of parsel's API.
import re
from parsel import Selector
def correct() -> None:
selector = Selector(
text="<html><body><ul><li>1</li><li>2</li><li>3</li></ul></body></html>"
)
li_values: list[str] = selector.css("li").getall()
selector.re_first(re.compile(r"[32]"), "").strip()
xpath_values: list[str] = selector.xpath(
"//somens:a/text()", namespaces={"somens": "http://scrapy.org"}
).extract()
class MySelector(Selector):
def my_own_func(self) -> int:
return 3
my_selector = MySelector()
res: int = my_selector.my_own_func()
sub_res: int = my_selector.xpath("//somens:a/text()")[0].my_own_func()
# Negative checks: all the code lines below have typing errors.
# the "# type: ignore" comment makes sure that mypy identifies them as errors.
def incorrect() -> None:
selector = Selector(
text="<html><body><ul><li>1</li><li>2</li><li>3</li></ul></body></html>"
)
# Wrong query type in css.
selector.css(5).getall() # type: ignore
# Cannot assign a list of str to an int.
li_values: int = selector.css("li").getall() # type: ignore
# Cannot use a string to define namespaces in xpath.
selector.xpath(
"//somens:a/text()", namespaces='{"somens": "http://scrapy.org"}' # type: ignore
).extract()
# Typo in the extract method name.
selector.css("li").extact() # type: ignore
class MySelector(Selector):
def my_own_func(self) -> int:
return 3
my_selector = MySelector()
res: str = my_selector.my_own_func() # type: ignore
sub_res: str = my_selector.xpath("//somens:a/text()")[0].my_own_func() # type: ignore
| bsd-3-clause | 5df9fba692eeea6cccb2bd34fae9f0b4 | 30.963636 | 90 | 0.607509 | 3.316981 | false | false | false | false |
quantmind/pulsar | pulsar/utils/__init__.py | 1 | 1138 | '''
Documentation for utilities used by pulsar internals. This module is
independent from all other pulsar modules and therefore can be used
as a stand-alone library.
HTTP
============
.. automodule:: pulsar.utils.httpurl
.. _tools-ws-parser:
Websocket
==============================
.. automodule:: pulsar.utils.websocket
.. _api-config:
Configuration
==================
.. automodule:: pulsar.utils.config
.. _socket_address:
Socket and addresses
=======================
.. automodule:: pulsar.utils.internet
:members:
Internals
=======================
.. module:: pulsar.utils.system
System info
~~~~~~~~~~~~~~~~~
.. autofunction:: process_info
.. module:: pulsar.utils.tools
Check arity
~~~~~~~~~~~~~~~~~
.. autofunction:: checkarity
Structures
==================
.. automodule:: pulsar.utils.structures
HTML & Text
==================
.. automodule:: pulsar.utils.html
:members:
Slugify
==================
.. automodule:: pulsar.utils.slugify
Logging
==================
.. automodule:: pulsar.utils.log
:members:
Path
==================
.. automodule:: pulsar.utils.path
:members:
'''
| bsd-3-clause | b3343da38d2718abe498b6a856658351 | 12.232558 | 68 | 0.565905 | 3.635783 | false | false | false | false |
quantmind/pulsar | pulsar/apps/data/redis/__init__.py | 1 | 1272 | '''
Pulsar is shipped with a :class:`.Store` implementation for redis_
and :ref:`pulsard-ds <pulsar-data-store>` servers.
.. _redis: http://redis.io/
Redis Store
~~~~~~~~~~~~
.. autoclass:: pulsar.apps.data.redis.store.RedisStore
:members:
:member-order: bysource
Redis Client
~~~~~~~~~~~~~~~
.. autoclass:: pulsar.apps.data.redis.client.RedisClient
:members:
:member-order: bysource
Redis Pipeline
~~~~~~~~~~~~~~~
.. autoclass:: pulsar.apps.data.redis.client.Pipeline
:members:
:member-order: bysource
'''
from ....utils.config import Global
from ..store import register_store
from ...ds import RedisError, NoScriptError, redis_parser
from .store import RedisStore, RedisStoreConnection
from .client import ResponseError, Consumer, Pipeline
from .lock import RedisScript, LockError
__all__ = ['RedisStore', 'RedisError', 'NoScriptError', 'redis_parser',
'RedisStoreConnection', 'Consumer', 'Pipeline', 'ResponseError',
'RedisScript', 'LockError']
class RedisServer(Global):
name = 'redis_server'
flags = ['--redis-server']
meta = "CONNECTION_STRING"
default = '127.0.0.1:6379/7'
desc = 'Default connection string for the redis server'
register_store('redis', 'pulsar.apps.data.redis:RedisStore')
| bsd-3-clause | a33c47e0a495fa36c1c610a468bd73dd | 23.941176 | 75 | 0.683962 | 3.46594 | false | false | false | false |
quantmind/pulsar | pulsar/async/access.py | 1 | 5246 | import os
import threading
import logging
import asyncio
from concurrent.futures import ThreadPoolExecutor
from collections import OrderedDict
from threading import current_thread
from inspect import isawaitable
from asyncio import Future, ensure_future
from ..utils.config import Config, Global
from ..utils.system import current_process, platform
__all__ = ['get_event_loop',
'new_event_loop',
'get_actor',
'cfg',
'cfg_value',
'isfuture',
'create_future',
'is_mainthread',
'process_data',
'thread_data',
'logger',
'NOTHING',
'EVENT_LOOPS',
'Future',
'isawaitable',
'ensure_future',
'CANCELLED_ERRORS']
_EVENT_LOOP_CLASSES = (asyncio.AbstractEventLoop,)
CANCELLED_ERRORS = (asyncio.CancelledError,)
def isfuture(x):
return isinstance(x, Future)
def create_future(loop=None):
loop = loop or get_event_loop()
try:
return loop.create_future()
except AttributeError:
return asyncio.Future(loop=loop)
LOGGER = logging.getLogger('pulsar')
NOTHING = object()
EVENT_LOOPS = OrderedDict()
DefaultLoopClass = asyncio.get_event_loop_policy()._loop_factory
def make_loop_factory(selector):
def loop_factory():
return DefaultLoopClass(selector())
return loop_factory
if platform.type == 'win': # pragma nocover
EVENT_LOOPS['select'] = asyncio.SelectorEventLoop
EVENT_LOOPS['proactor'] = asyncio.ProactorEventLoop
else:
for selector in ('Epoll', 'Kqueue', 'Poll', 'Select'):
name = '%sSelector' % selector
selector_class = getattr(asyncio.selectors, name, None)
if selector_class:
EVENT_LOOPS[selector.lower()] = make_loop_factory(selector_class)
try: # add uvloop if available
import uvloop
EVENT_LOOPS['uv'] = uvloop.Loop
# Future = uvloop.Future
except Exception: # pragma nocover
pass
if os.environ.get('BUILDING-PULSAR-DOCS') == 'yes': # pragma nocover
default_loop = (
'uvloop if available, epoll on linux, '
'kqueue on mac, select on windows'
)
elif EVENT_LOOPS:
default_loop = tuple(EVENT_LOOPS)[0]
else:
default_loop = None
if default_loop:
class EventLoopSetting(Global):
name = "event_loop"
flags = ["--io"]
choices = tuple(EVENT_LOOPS)
default = default_loop
desc = """\
Specify the event loop used for I/O event polling.
The default value is the best possible for the system running the
application.
"""
get_event_loop = asyncio.get_event_loop
new_event_loop = asyncio.new_event_loop
def is_mainthread(thread=None):
'''Check if thread is the main thread.
If ``thread`` is not supplied check the current thread
'''
thread = thread if thread is not None else current_thread()
return isinstance(thread, threading._MainThread)
def logger(loop=None, logger=None):
return getattr(loop or get_event_loop(), 'logger', LOGGER)
def process_data(name=None):
'''Fetch the current process local data dictionary.
If ``name`` is not ``None`` it returns the value at``name``,
otherwise it return the process data dictionary
'''
ct = current_process()
if not hasattr(ct, '_pulsar_local'):
ct._pulsar_local = {}
loc = ct._pulsar_local
return loc.get(name) if name else loc
def thread_data(name, value=NOTHING, ct=None):
'''Set or retrieve an attribute ``name`` from thread ``ct``.
If ``ct`` is not given used the current thread. If ``value``
is None, it will get the value otherwise it will set the value.
'''
ct = ct or current_thread()
if is_mainthread(ct):
loc = process_data()
elif not hasattr(ct, '_pulsar_local'):
ct._pulsar_local = loc = {}
else:
loc = ct._pulsar_local
if value is not NOTHING:
if name in loc:
if loc[name] is not value:
raise RuntimeError(
'%s is already available on this thread' % name)
else:
loc[name] = value
return loc.get(name)
def get_actor():
return thread_data('actor')
def set_actor(actor):
return thread_data('actor', actor)
def cfg():
actor = get_actor()
if actor:
return actor.cfg
else:
return Config()
def cfg_value(setting, value=None):
return cfg().get(setting) if value is None else value
class EventLoopPolicy(asyncio.DefaultEventLoopPolicy):
def __init__(self, name, workers, debug):
super().__init__()
self.name = name
self.workers = workers
self.debug = debug
@property
def _local(self):
lp = getattr(current_process(), '_event_loop_policy', None)
if lp is None:
self._local = lp = self._Local()
return lp
@_local.setter
def _local(self, v):
current_process()._event_loop_policy = v
def _loop_factory(self):
loop = EVENT_LOOPS[self.name]()
loop.set_default_executor(ThreadPoolExecutor(self.workers))
if self.debug:
loop.set_debug(True)
return loop
| bsd-3-clause | 915b72c2155923441aed899d46037b59 | 24.466019 | 77 | 0.617042 | 3.834795 | false | false | false | false |
quantmind/pulsar | pulsar/apps/http/client.py | 1 | 34495 | import os
import platform
import logging
import asyncio
from functools import partial
from collections import namedtuple
from io import StringIO, BytesIO
from urllib.parse import urlparse, parse_qsl, urlencode, urlunparse
from http.client import responses
try:
import ssl
BaseSSLError = ssl.SSLError
except ImportError: # pragma nocover
ssl = None
class BaseSSLError(Exception):
pass
try:
from certifi import where
DEFAULT_CA_BUNDLE_PATH = where()
except ImportError: # pragma nocover
DEFAULT_CA_BUNDLE_PATH = None
from multidict import CIMultiDict
import pulsar
from pulsar.api import (
AbortEvent, AbstractClient, Pool, Connection,
ProtocolConsumer, HttpRequestException, HttpConnectionError,
SSLError, cfg_value
)
from pulsar.utils import websocket
from pulsar.utils.system import json as _json
from pulsar.utils.string import to_bytes
from pulsar.utils import http
from pulsar.utils.structures import mapping_iterator
from pulsar.async.timeout import timeout as async_timeout
from pulsar.utils.httpurl import (
encode_multipart_formdata, CHARSET, get_environ_proxies, is_succesful,
get_hostport, cookiejar_from_dict, http_chunks, JSON_CONTENT_TYPES,
parse_options_header, tls_schemes, parse_header_links, requote_uri,
)
from .plugins import (
handle_cookies, WebSocket, Redirect, start_request, RequestKey,
keep_alive, InfoHeaders, Expect
)
from .auth import Auth, HTTPBasicAuth
from .stream import HttpStream
from .decompress import GzipDecompress, DeflateDecompress
scheme_host = namedtuple('scheme_host', 'scheme netloc')
LOGGER = logging.getLogger('pulsar.http')
FORM_URL_ENCODED = 'application/x-www-form-urlencoded'
MULTIPART_FORM_DATA = 'multipart/form-data'
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def scheme_host_port(url):
url = urlparse(url)
host, port = get_hostport(url.scheme, url.netloc)
return url.scheme, host, port
def is_streamed(data):
try:
len(data)
except TypeError:
return True
return False
def split_url_params(params):
for key, values in mapping_iterator(params):
if not isinstance(values, (list, tuple)):
values = (values,)
for value in values:
yield key, value
def full_url(url, params, method=None):
p = urlparse(url)
if not p.netloc and method == 'CONNECT':
p = urlparse('http://%s' % url)
params = mapping_iterator(params)
query = parse_qsl(p.query, True)
query.extend(split_url_params(params))
query = urlencode(query)
return requote_uri(
urlunparse((p.scheme, p.netloc, p.path, p.params, query, p.fragment))
)
class RequestBase:
inp_params = None
release_connection = True
history = None
url = None
@property
def unverifiable(self):
"""Unverifiable when a redirect.
It is a redirect when :attr:`history` has past requests.
"""
return bool(self.history)
@property
def origin_req_host(self):
"""Required by Cookies handlers
"""
if self.history:
return self.history[0].request.origin_req_host
else:
return scheme_host_port(self.url)[1]
@property
def type(self):
return self.key.scheme
@property
def full_url(self):
return self.url
def new_parser(self, protocol):
protocol.headers = CIMultiDict()
return self.client.http_parser(protocol)
def get_full_url(self):
"""Required by Cookies handlers
"""
return self.url
def write_body(self, transport):
pass
class HttpTunnel(RequestBase):
first_line = None
data = None
decompress = False
method = 'CONNECT'
def __init__(self, client, req):
self.client = client
self.key = req
self.headers = CIMultiDict(client.DEFAULT_TUNNEL_HEADERS)
def __repr__(self):
return 'Tunnel %s' % self.url
__str__ = __repr__
def encode(self):
self.headers['host'] = self.key.netloc
self.first_line = 'CONNECT http://%s:%s HTTP/1.1' % self.key.address
buffer = [self.first_line.encode('ascii'), b'\r\n']
buffer.extend((('%s: %s\r\n' % (name, value)).encode(CHARSET)
for name, value in self.headers.items()))
buffer.append(b'\r\n')
return b''.join(buffer)
def has_header(self, header_name):
return header_name in self.headers
def get_header(self, header_name, default=None):
return self.headers.get(header_name, default)
def remove_header(self, header_name):
self.headers.pop(header_name, None)
class HttpRequest(RequestBase):
"""An :class:`HttpClient` request for an HTTP resource.
This class has a similar interface to :class:`urllib.request.Request`.
:param files: optional dictionary of name, file-like-objects.
:param allow_redirects: allow the response to follow redirects.
.. attribute:: method
The request method
.. attribute:: version
HTTP version for this request, usually ``HTTP/1.1``
.. attribute:: history
List of past :class:`.HttpResponse` (collected during redirects).
.. attribute:: wait_continue
if ``True``, the :class:`HttpRequest` includes the
``Expect: 100-Continue`` header.
.. attribute:: stream
Allow for streaming body
"""
_proxy = None
_ssl = None
_tunnel = None
_write_done = False
def __init__(self, client, url, method, inp_params=None, headers=None,
data=None, files=None, json=None, history=None, auth=None,
charset=None, max_redirects=10, source_address=None,
allow_redirects=False, decompress=True, version=None,
wait_continue=False, websocket_handler=None, cookies=None,
params=None, stream=False, proxies=None, verify=True,
cert=None, **extra):
self.client = client
self.method = method.upper()
self.inp_params = inp_params or {}
self.unredirected_headers = CIMultiDict()
self.history = history
self.wait_continue = wait_continue
self.max_redirects = max_redirects
self.allow_redirects = allow_redirects
self.charset = charset or 'utf-8'
self.version = version
self.decompress = decompress
self.websocket_handler = websocket_handler
self.source_address = source_address
self.stream = stream
self.verify = verify
self.cert = cert
if auth and not isinstance(auth, Auth):
auth = HTTPBasicAuth(*auth)
self.auth = auth
self.url = full_url(url, params, method=self.method)
self._set_proxy(proxies)
self.key = RequestKey.create(self)
self.headers = client.get_headers(self, headers)
self.body = self._encode_body(data, files, json)
self.unredirected_headers['host'] = self.key.netloc
cookies = cookiejar_from_dict(client.cookies, cookies)
if cookies:
cookies.add_cookie_header(self)
@property
def _loop(self):
return self.client._loop
@property
def ssl(self):
"""Context for TLS connections.
If this is a tunneled request and the tunnel connection is not yet
established, it returns ``None``.
"""
return self._ssl
@property
def proxy(self):
"""Proxy server for this request.
"""
return self._proxy
@property
def tunnel(self):
"""Tunnel for this request.
"""
return self._tunnel
def __repr__(self):
return self.first_line()
__str__ = __repr__
def first_line(self):
if self.method == 'CONNECT':
url = self.key.netloc
elif self._proxy:
url = self.url
else:
p = urlparse(self.url)
url = urlunparse(('', '', p.path or '/', p.params,
p.query, p.fragment))
return '%s %s %s' % (self.method, url, self.version)
def is_chunked(self):
return self.body and 'content-length' not in self.headers
def encode(self):
"""The bytes representation of this :class:`HttpRequest`.
Called by :class:`HttpResponse` when it needs to encode this
:class:`HttpRequest` before sending it to the HTTP resource.
"""
# Call body before fist_line in case the query is changes.
first_line = self.first_line()
if self.body and self.wait_continue:
self.headers['expect'] = '100-continue'
headers = self.headers
if self.unredirected_headers:
headers = self.unredirected_headers.copy()
headers.update(self.headers)
buffer = [first_line.encode('ascii'), b'\r\n']
buffer.extend((('%s: %s\r\n' % (name, value)).encode(CHARSET)
for name, value in headers.items()))
buffer.append(b'\r\n')
return b''.join(buffer)
def add_header(self, key, value):
self.headers[key] = value
def has_header(self, header_name):
"""Check ``header_name`` is in this request headers.
"""
return (header_name in self.headers or
header_name in self.unredirected_headers)
def get_header(self, header_name, default=None):
"""Retrieve ``header_name`` from this request headers.
"""
return self.headers.get(
header_name, self.unredirected_headers.get(header_name, default))
def remove_header(self, header_name):
"""Remove ``header_name`` from this request.
"""
val1 = self.headers.pop(header_name, None)
val2 = self.unredirected_headers.pop(header_name, None)
return val1 or val2
def add_unredirected_header(self, header_name, header_value):
self.unredirected_headers[header_name] = header_value
def write_body(self, transport):
assert not self._write_done, 'Body already sent'
self._write_done = True
if not self.body:
return
if is_streamed(self.body):
self._loop.create_task(self._write_streamed_data(transport))
else:
self._write_body_data(transport, self.body, True)
# INTERNAL ENCODING METHODS
def _encode_body(self, data, files, json):
body = None
ct = None
if isinstance(data, (str, bytes)):
if files:
raise ValueError('data cannot be a string or bytes when '
'files are present')
body = to_bytes(data, self.charset)
elif data and is_streamed(data):
if files:
raise ValueError('data cannot be an iterator when '
'files are present')
if 'content-length' not in self.headers:
self.headers['transfer-encoding'] = 'chunked'
return data
elif data or files:
if files:
body, ct = self._encode_files(data, files)
else:
body, ct = self._encode_params(data)
elif json:
body = _json.dumps(json).encode(self.charset)
ct = 'application/json'
if not self.headers.get('content-type') and ct:
self.headers['Content-Type'] = ct
if body:
self.headers['content-length'] = str(len(body))
return body
def _encode_files(self, data, files):
fields = []
for field, val in mapping_iterator(data or ()):
if (isinstance(val, str) or isinstance(val, bytes) or
not hasattr(val, '__iter__')):
val = [val]
for v in val:
if v is not None:
if not isinstance(v, bytes):
v = str(v)
fields.append((field.decode('utf-8') if
isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str)
else v))
for (k, v) in mapping_iterator(files):
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, bytes):
fp = BytesIO(fp)
elif isinstance(fp, str):
fp = StringIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
fields.append((k, new_v))
#
return encode_multipart_formdata(fields, charset=self.charset)
def _encode_params(self, params):
content_type = self.headers.get('content-type')
# No content type given, chose one
if not content_type:
content_type = FORM_URL_ENCODED
if hasattr(params, 'read'):
params = params.read()
if content_type in JSON_CONTENT_TYPES:
body = _json.dumps(params)
elif content_type == FORM_URL_ENCODED:
body = urlencode(tuple(split_url_params(params)))
elif content_type == MULTIPART_FORM_DATA:
body, content_type = encode_multipart_formdata(
params, charset=self.charset)
else:
body = params
return to_bytes(body, self.charset), content_type
def _write_body_data(self, transport, data, finish=False):
if self.is_chunked():
data = http_chunks(data, finish)
elif data:
data = (data,)
else:
return
for chunk in data:
transport.write(chunk)
async def _write_streamed_data(self, transport):
for data in self.body:
try:
data = await data
except TypeError:
pass
self._write_body_data(transport, data)
self._write_body_data(transport, b'', True)
# PROXY INTERNALS
def _set_proxy(self, proxies):
url = urlparse(self.url)
request_proxies = self.client.proxies.copy()
if proxies:
request_proxies.update(proxies)
self.proxies = request_proxies
#
if url.scheme in request_proxies:
host, port = get_hostport(url.scheme, url.netloc)
no_proxy = [n for n in request_proxies.get('no', '').split(',')
if n]
if not any(map(host.endswith, no_proxy)):
proxy_url = request_proxies[url.scheme]
if url.scheme in tls_schemes:
self._tunnel = proxy_url
else:
self._proxy = proxy_url
class HttpResponse(ProtocolConsumer):
"""A :class:`.ProtocolConsumer` for the HTTP client protocol.
Initialised by a call to the :class:`HttpClient.request` method.
"""
_has_proxy = False
_data_sent = None
_cookies = None
_raw = None
content = None
headers = None
parser = None
version = None
status_code = None
request_again = None
ONE_TIME_EVENTS = ('pre_request', 'on_headers', 'post_request')
def __repr__(self):
return '<Response [%s]>' % (self.status_code or 'None')
__str__ = __repr__
@property
def url(self):
"""The request full url.
"""
request = self.request
if request:
return request.url
@property
def history(self):
"""List of :class:`.HttpResponse` objects from the history of the
request. Any redirect responses will end up here.
The list is sorted from the oldest to the most recent request."""
request = self.request
if request:
return request.history
@property
def ok(self):
if self.status_code:
return is_succesful(self.status_code)
else:
return not self.event('post_request').fired()
@property
def cookies(self):
"""Dictionary of cookies set by the server or ``None``.
"""
return self._cookies
@property
def encoding(self):
ct = self.headers.get('content-type')
if ct:
ct, options = parse_options_header(ct)
return options.get('charset')
@property
def raw(self):
"""A raw asynchronous Http response
"""
if self._raw is None:
self._raw = HttpStream(self)
return self._raw
@property
def links(self):
"""Returns the parsed header links of the response, if any
"""
headers = self.headers or {}
header = headers.get('link')
li = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
li[key] = link
return li
@property
def reason(self):
return responses.get(self.status_code)
@property
def text(self):
"""Decode content as a string.
"""
data = self.content
return data.decode(self.encoding or 'utf-8') if data else ''
def json(self):
"""Decode content as a JSON object.
"""
return _json.loads(self.text)
def decode_content(self):
"""Return the best possible representation of the response body.
"""
ct = self.headers.get('content-type')
if ct:
ct, options = parse_options_header(ct)
charset = options.get('charset')
if ct in JSON_CONTENT_TYPES:
return self.json()
elif ct.startswith('text/'):
return self.text
elif ct == FORM_URL_ENCODED:
return parse_qsl(self.content.decode(charset),
keep_blank_values=True)
return self.content
def raise_for_status(self):
"""Raises stored :class:`HTTPError` or :class:`URLError`, if occurred.
"""
if not self.ok:
reason = self.reason or 'No response from %s' % self.url
if not self.status_code:
raise HttpConnectionError(reason, response=self)
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error - %s - %s %s' % (
self.status_code, reason, self.request.method, self.url)
else:
http_error_msg = '%s Server Error - %s - %s %s' % (
self.status_code, reason, self.request.method, self.url)
raise HttpRequestException(http_error_msg, response=self)
def info(self):
"""Required by python CookieJar.
Return :attr:`headers`.
"""
return InfoHeaders(self.headers)
# #####################################################################
# # PROTOCOL CONSUMER IMPLEMENTATION
def start_request(self):
request = self.request
self.parser = request.new_parser(self)
headers = request.encode()
self.connection.transport.write(headers)
if not headers or request.headers.get('expect') != '100-continue':
self.write_body()
def feed_data(self, data):
try:
self.parser.feed_data(data)
except http.HttpParserUpgrade:
pass
def on_header(self, name, value):
self.headers.add(name.decode(CHARSET), value.decode(CHARSET))
def on_headers_complete(self):
request = self.request
self.status_code = self.parser.get_status_code()
self.version = self.parser.get_http_version()
self.event('on_headers').fire()
if request.method == 'HEAD':
self.event('post_request').fire()
def on_body(self, body):
if self.request.stream or self._raw:
self.raw.feed_data(body)
elif self.content is None:
self.content = body
else:
self.content += body
def on_message_complete(self):
self.producer.maybe_decompress(self)
self.fire_event('post_request')
def write_body(self):
self.request.write_body(self.connection)
class HttpClient(AbstractClient):
"""A client for HTTP/HTTPS servers.
It handles pool of asynchronous connections.
:param pool_size: set the :attr:`pool_size` attribute.
:param store_cookies: set the :attr:`store_cookies` attribute
.. attribute:: headers
Default headers for this :class:`HttpClient`.
Default: :attr:`DEFAULT_HTTP_HEADERS`.
.. attribute:: cookies
Default cookies for this :class:`HttpClient`.
.. attribute:: store_cookies
If ``True`` it remembers response cookies and sends them back to
servers.
Default: ``True``
.. attribute:: timeout
Default timeout for requests. If None or 0, no timeout on requests
.. attribute:: proxies
Dictionary of proxy servers for this client.
.. attribute:: pool_size
The size of a pool of connection for a given host.
.. attribute:: connection_pools
Dictionary of connection pools for different hosts
.. attribute:: DEFAULT_HTTP_HEADERS
Default headers for this :class:`HttpClient`
"""
max_redirects = 10
"""Maximum number of redirects.
It can be overwritten on :meth:`request`.
"""
connection_pool = Pool
"""Connection :class:`.Pool` factory
"""
client_version = pulsar.SERVER_SOFTWARE
"""String for the ``User-Agent`` header.
"""
version = 'HTTP/1.1'
"""Default HTTP request version for this :class:`HttpClient`.
It can be overwritten on :meth:`request`.
"""
DEFAULT_HTTP_HEADERS = (
('Connection', 'Keep-Alive'),
('Accept', '*/*'),
('Accept-Encoding', 'deflate'),
('Accept-Encoding', 'gzip')
)
DEFAULT_TUNNEL_HEADERS = (
('Connection', 'Keep-Alive'),
('Proxy-Connection', 'Keep-Alive')
)
request_parameters = (
'max_redirects',
'decompress',
'websocket_handler',
'version',
'verify',
'stream',
'cert'
)
# Default hosts not affected by proxy settings. This can be overwritten
# by specifying the "no" key in the proxies dictionary
no_proxy = set(('localhost', platform.node()))
def __init__(self, proxies=None, headers=None, verify=True,
cookies=None, store_cookies=True, cert=None,
max_redirects=10, decompress=True, version=None,
websocket_handler=None, parser=None, trust_env=True,
loop=None, client_version=None, timeout=None, stream=False,
pool_size=10, frame_parser=None, logger=None,
close_connections=False, keep_alive=None):
super().__init__(
partial(Connection, HttpResponse),
loop=loop,
keep_alive=keep_alive or cfg_value('http_keep_alive')
)
self.logger = logger or LOGGER
self.client_version = client_version or self.client_version
self.connection_pools = {}
self.pool_size = pool_size
self.trust_env = trust_env
self.timeout = timeout
self.store_cookies = store_cookies
self.max_redirects = max_redirects
self.cookies = cookiejar_from_dict(cookies)
self.decompress = decompress
self.version = version or self.version
# SSL Verification default
self.verify = verify
# SSL client certificate default, if String, path to ssl client
# cert file (.pem). If Tuple, ('cert', 'key') pair
self.cert = cert
self.stream = stream
self.close_connections = close_connections
dheaders = CIMultiDict(self.DEFAULT_HTTP_HEADERS)
dheaders['user-agent'] = self.client_version
# override headers
if headers:
for name, value in mapping_iterator(headers):
if value is None:
dheaders.pop(name, None)
else:
dheaders[name] = value
self.headers = dheaders
self.proxies = dict(proxies or ())
if not self.proxies and self.trust_env:
self.proxies = get_environ_proxies()
if 'no' not in self.proxies:
self.proxies['no'] = ','.join(self.no_proxy)
self.websocket_handler = websocket_handler
self.http_parser = parser or http.HttpResponseParser
self.frame_parser = frame_parser or websocket.frame_parser
# Add hooks
self.event('on_headers').bind(handle_cookies)
self.event('pre_request').bind(WebSocket())
self.event('post_request').bind(Expect())
self.event('post_request').bind(Redirect())
self._decompressors = dict(
gzip=GzipDecompress(),
deflate=DeflateDecompress()
)
# API
def connect(self, address):
if isinstance(address, tuple):
address = ':'.join(('%s' % v for v in address))
return self.request('CONNECT', address)
def get(self, url, **kwargs):
"""Sends a GET request and returns a :class:`.HttpResponse` object.
:params url: url for the new :class:`HttpRequest` object.
:param \*\*kwargs: Optional arguments for the :meth:`request` method.
"""
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request and returns a :class:`.HttpResponse` object.
:params url: url for the new :class:`HttpRequest` object.
:param \*\*kwargs: Optional arguments for the :meth:`request` method.
"""
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request and returns a :class:`.HttpResponse` object.
:params url: url for the new :class:`HttpRequest` object.
:param \*\*kwargs: Optional arguments for the :meth:`request` method.
"""
return self.request('HEAD', url, **kwargs)
def post(self, url, **kwargs):
"""Sends a POST request and returns a :class:`.HttpResponse` object.
:params url: url for the new :class:`HttpRequest` object.
:param \*\*kwargs: Optional arguments for the :meth:`request` method.
"""
return self.request('POST', url, **kwargs)
def put(self, url, **kwargs):
"""Sends a PUT request and returns a :class:`.HttpResponse` object.
:params url: url for the new :class:`HttpRequest` object.
:param \*\*kwargs: Optional arguments for the :meth:`request` method.
"""
return self.request('PUT', url, **kwargs)
def patch(self, url, **kwargs):
"""Sends a PATCH request and returns a :class:`.HttpResponse` object.
:params url: url for the new :class:`HttpRequest` object.
:param \*\*kwargs: Optional arguments for the :meth:`request` method.
"""
return self.request('PATCH', url, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request and returns a :class:`.HttpResponse` object.
:params url: url for the new :class:`HttpRequest` object.
:param \*\*kwargs: Optional arguments for the :meth:`request` method.
"""
return self.request('DELETE', url, **kwargs)
def request(self, method, url, **params):
"""Constructs and sends a request to a remote server.
It returns a :class:`.Future` which results in a
:class:`.HttpResponse` object.
:param method: request method for the :class:`HttpRequest`.
:param url: URL for the :class:`HttpRequest`.
:param params: optional parameters for the :class:`HttpRequest`
initialisation.
:rtype: a coroutine
"""
response = self._request(method, url, **params)
if not self._loop.is_running():
return self._loop.run_until_complete(response)
else:
return response
def close(self):
"""Close all connections
"""
waiters = []
for p in self.connection_pools.values():
waiters.append(p.close())
self.connection_pools.clear()
return asyncio.gather(*waiters, loop=self._loop)
def maybe_decompress(self, response):
encoding = response.headers.get('content-encoding')
if encoding and response.request.decompress:
deco = self._decompressors.get(encoding)
if not deco:
self.logger.warning('Cannot decompress %s', encoding)
response.content = deco(response.content)
async def __aenter__(self):
await self.close()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
# INTERNALS
async def _request(self, method, url, timeout=None, **params):
if timeout is None:
timeout = self.timeout
if method != 'HEAD':
params.setdefault('allow_redirects', True)
with async_timeout(self._loop, timeout):
nparams = params.copy()
nparams.update(((name, getattr(self, name)) for name in
self.request_parameters if name not in params))
request = HttpRequest(self, url, method, params, **nparams)
key = request.key
pool = self.connection_pools.get(key)
if pool is None:
tunnel = request.tunnel
if tunnel:
connector = partial(self.create_tunnel_connection, key)
else:
connector = partial(self.create_http_connection, key)
pool = self.connection_pool(
connector, pool_size=self.pool_size, loop=self._loop
)
self.connection_pools[request.key] = pool
try:
conn = await pool.connect()
except BaseSSLError as e:
raise SSLError(str(e), response=self) from None
except ConnectionRefusedError as e:
raise HttpConnectionError(str(e), response=self) from None
async with conn:
try:
response = await start_request(request, conn)
status_code = response.status_code
except AbortEvent:
response = None
status_code = None
if (not status_code or
not keep_alive(response.version, response.headers) or
status_code == 101 or
# if response is done stream is not relevant
(response.request.stream and not
response.event('post_request').fired()) or
self.close_connections):
await conn.detach()
# Handle a possible redirect
if response and isinstance(response.request_again, tuple):
method, url, params = response.request_again
response = await self._request(method, url, **params)
return response
def get_headers(self, request, headers):
# Returns a :class:`Header` obtained from combining
# :attr:`headers` with *headers*. Can handle websocket requests.
# TODO: this is a buf in CIMultiDict
# d = self.headers.copy()
d = CIMultiDict(self.headers.items())
if headers:
d.update(headers)
return d
def ssl_context(self, verify=True, cert_reqs=None,
check_hostname=False, certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None, **kw):
"""Create a SSL context object.
This method should not be called by from user code
"""
assert ssl, 'SSL not supported'
cafile = cafile or DEFAULT_CA_BUNDLE_PATH
if verify is True:
cert_reqs = ssl.CERT_REQUIRED
check_hostname = True
if isinstance(verify, str):
cert_reqs = ssl.CERT_REQUIRED
if os.path.isfile(verify):
cafile = verify
elif os.path.isdir(verify):
capath = verify
return ssl._create_unverified_context(cert_reqs=cert_reqs,
check_hostname=check_hostname,
certfile=certfile,
keyfile=keyfile,
cafile=cafile,
capath=capath,
cadata=cadata)
async def create_http_connection(self, req):
return await self.create_connection(req.address, ssl=req.ssl(self))
async def create_tunnel_connection(self, req):
"""Create a tunnel connection
"""
tunnel_address = req.tunnel_address
connection = await self.create_connection(tunnel_address)
response = connection.current_consumer()
for event in response.events().values():
event.clear()
response.start(HttpTunnel(self, req))
await response.event('post_request').waiter()
if response.status_code != 200:
raise ConnectionRefusedError(
'Cannot connect to tunnel: status code %s'
% response.status_code
)
raw_sock = connection.transport.get_extra_info('socket')
if raw_sock is None:
raise RuntimeError('Transport without socket')
# duplicate socket so we can close transport
raw_sock = raw_sock.dup()
connection.transport.close()
await connection.event('connection_lost').waiter()
self.sessions -= 1
self.requests_processed -= 1
#
connection = await self.create_connection(
sock=raw_sock, ssl=req.ssl(self), server_hostname=req.netloc
)
return connection
| bsd-3-clause | ba11a09c858e74ed1e95bfd6a6beb7c3 | 32.425388 | 79 | 0.574083 | 4.267599 | false | false | false | false |
quantmind/pulsar | pulsar/utils/tools/arity.py | 1 | 2604 | import inspect
__all__ = ['checkarity']
def checkarity(func, args, kwargs, discount=0):
'''Check if arguments respect a given function arity and return
an error message if the check did not pass,
otherwise it returns ``None``.
:parameter func: the function.
:parameter args: function arguments.
:parameter kwargs: function key-valued parameters.
:parameter discount: optional integer which discount the number of
positional argument to check. Default ``0``.
'''
spec = inspect.getargspec(func)
self = getattr(func, '__self__', None)
if self and spec.args:
discount += 1
args = list(args)
defaults = list(spec.defaults or ())
len_defaults = len(defaults)
len_args = len(spec.args) - discount
len_args_input = len(args)
minlen = len_args - len_defaults
totlen = len_args_input + len(kwargs)
maxlen = len_args
if spec.varargs or spec.keywords:
maxlen = None
if not minlen:
return
if not spec.defaults and maxlen:
start = '"{0}" takes'.format(func.__name__)
else:
if maxlen and totlen > maxlen:
start = '"{0}" takes at most'.format(func.__name__)
else:
start = '"{0}" takes at least'.format(func.__name__)
if totlen < minlen:
return '{0} {1} parameters. {2} given.'.format(start, minlen, totlen)
elif maxlen and totlen > maxlen:
return '{0} {1} parameters. {2} given.'.format(start, maxlen, totlen)
# Length of parameter OK, check names
if len_args_input < len_args:
le = minlen - len_args_input
for arg in spec.args[discount:]:
if args:
args.pop(0)
else:
if le > 0:
if defaults:
defaults.pop(0)
elif arg not in kwargs:
return ('"{0}" has missing "{1}" parameter.'
.format(func.__name__, arg))
kwargs.pop(arg, None)
le -= 1
if kwargs and maxlen:
s = ''
if len(kwargs) > 1:
s = 's'
p = ', '.join('"{0}"'.format(p) for p in kwargs)
return ('"{0}" does not accept {1} parameter{2}.'
.format(func.__name__, p, s))
elif len_args_input > len_args + len_defaults:
n = len_args + len_defaults
start = '"{0}" takes'.format(func.__name__)
return ('{0} {1} positional parameters. {2} given.'
.format(start, n, len_args_input))
| bsd-3-clause | 6cf1721812f1911ef0586b71179cc3bb | 34.671233 | 77 | 0.53341 | 4.018519 | false | false | false | false |
quantmind/pulsar | pulsar/utils/structures/misc.py | 1 | 5833 | from itertools import islice
import collections
Mapping = collections.Mapping
COLLECTIONS = (list, tuple, set, frozenset)
def mapping_iterator(iterable):
if isinstance(iterable, Mapping):
return iterable.items()
else:
return iterable or ()
def inverse_mapping(iterable):
if isinstance(iterable, Mapping):
iterable = iterable.items()
return ((value, key) for key, value in iterable)
def isgenerator(value):
return hasattr(value, '__iter__') and not hasattr(value, '__len__')
def aslist(value):
if value is None:
return []
elif isinstance(value, list):
return value
elif isgenerator(value) or isinstance(value, COLLECTIONS):
return list(value)
else:
return [value]
def as_tuple(value):
if value is None:
return ()
elif isinstance(value, tuple):
return value
elif isgenerator(value) or isinstance(value, COLLECTIONS):
return tuple(value)
else:
return value,
class AttributeDictionary(collections.Mapping):
'''A :class:`Mapping` structures which exposes keys as attributes.'''
def __init__(self, *iterable, **kwargs):
if iterable:
if len(iterable) > 1:
raise TypeError('%s exceped at most 1 arguments, got %s.' %
(self.__class__.__name__, len(iterable)))
self.update(iterable[0])
if kwargs:
self.__dict__.update(kwargs)
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return str(self.__dict__)
def __contains__(self, name):
return name in self.__dict__
def __len__(self):
return len(self.__dict__)
def __iter__(self):
return iter(self.__dict__)
def __getattr__(self, name):
return self.__dict__.get(name)
def __setattr__(self, name, value):
self.__dict__[name] = value
def __setitem__(self, name, value):
self.__dict__[name] = value
def __getitem__(self, name):
return self.__dict__[name]
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, state):
self.__dict__.update(state)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def all(self):
return self.__dict__
def pop(self, name, default=None):
return self.__dict__.pop(name, default)
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def copy(self):
return self.__class__(self)
def clear(self):
self.__dict__.clear()
class FrozenDict(dict):
'''A dictionary which cannot be changed once initialised.'''
def __init__(self, *iterable, **kwargs):
update = super().update
if iterable:
if len(iterable) > 1:
raise TypeError('%s exceped at most 1 arguments, got %s.' %
(self.__class__.__name__, len(iterable)))
update(iterable[0])
if kwargs:
update(kwargs)
def __setitem__(self, key, value):
raise TypeError("'%s' object does not support item assignment"
% self.__class__.__name__)
def update(self, iterable):
raise TypeError("'%s' object does not support update"
% self.__class__.__name__)
def pop(self, key):
raise TypeError("'%s' object does not support pop"
% self.__class__.__name__)
def __gt__(self, other):
if hasattr(other, '__len__'):
return len(self) > len(other)
else:
return False
def __lt__(self, other):
if hasattr(other, '__len__'):
return len(self) < len(other)
else:
return False
class Dict(dict):
def mget(self, fields):
return [self.get(f) for f in fields]
def flat(self):
result = []
[result.extend(pair) for pair in self.items()]
return result
class Deque(collections.deque):
def insert_before(self, pivot, value):
li = list(self)
try:
index = li.index(pivot)
except ValueError:
pass
else:
li.insert(index, value)
self.clear()
self.extend(li)
def insert_after(self, pivot, value):
li = list(self)
try:
index = li.index(pivot)
except ValueError:
pass
else:
li.insert(index+1, value)
self.clear()
self.extend(li)
def remove(self, elem, count=1):
rev = False
if count:
if count < 0:
rev = True
count = -count
li = list(reversed(self))
else:
li = list(self)
while count:
try:
li.remove(elem)
count -= 1
except ValueError:
break
else:
li = [v for v in self if v != elem]
removed = len(self) - len(li)
if removed:
self.clear()
self.extend(reversed(li) if rev else li)
return removed
def trim(self, start, end):
slice = list(islice(self, start, end))
self.clear()
self.extend(slice)
def recursive_update(target, mapping):
for key, value in mapping.items():
if value is not None:
if key in target:
cont = target[key]
if isinstance(value, Mapping) and isinstance(cont, Mapping):
recursive_update(cont, value)
else:
target[key] = value
else:
target[key] = value
| bsd-3-clause | cbbc4376c835ce9909d6811453f0101d | 24.696035 | 76 | 0.520487 | 4.330364 | false | false | false | false |
quantmind/pulsar | pulsar/utils/lib.py | 1 | 1261 | import os
if os.environ.get('PULSARPY', 'no') == 'yes':
HAS_C_EXTENSIONS = False
else:
HAS_C_EXTENSIONS = True
try:
import httptools # noqa
from .clib import (
EventHandler, ProtocolConsumer, Protocol, Producer, WsgiProtocol,
AbortEvent, RedisParser, WsgiResponse, wsgi_cached, http_date,
FrameParser, has_empty_content, isawaitable, Event
)
except ImportError:
HAS_C_EXTENSIONS = False
if not HAS_C_EXTENSIONS:
from inspect import isawaitable # noqa
from .pylib.protocols import ProtocolConsumer, Protocol, Producer # noqa
from .pylib.events import EventHandler, AbortEvent, Event # noqa
from .pylib.wsgi import WsgiProtocol, http_date, has_empty_content # noqa
from .pylib.wsgiresponse import WsgiResponse, wsgi_cached # noqa
from .pylib.redisparser import RedisParser # noqa
from .pylib.websocket import FrameParser # noqa
__all__ = [
'HAS_C_EXTENSIONS',
'AbortEvent',
'EventHandler',
'Event',
'ProtocolConsumer',
'Protocol',
'WsgiProtocol',
'WsgiResponse',
'wsgi_cached',
'http_date',
'isawaitable',
'has_empty_content',
'RedisParser'
]
| bsd-3-clause | 2b53cbc9cca73ff46ad55e21c2f29427 | 28.325581 | 78 | 0.632831 | 3.844512 | false | false | true | false |
quantmind/pulsar | pulsar/utils/system/winprocess.py | 8 | 1245 | import sys
import os
from multiprocessing import forking, process, freeze_support
from multiprocessing.util import _logger, _log_to_stderr
WINEXE = forking.WINEXE
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object.
Monkey-patch from
'''
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if (not os.path.isabs(main_path) and process.ORIGINAL_DIR
is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
if not main_path.endswith('.exe'):
d['main_path'] = os.path.normpath(main_path)
return d
forking.get_preparation_data = get_preparation_data
freeze_support()
| bsd-3-clause | 0e7d72990be7ca9146eb42e18445dc8f | 28.365854 | 73 | 0.587952 | 3.619186 | false | false | false | false |
quantmind/pulsar | tests/bench/test_wsgi.py | 1 | 1110 | import unittest
from pulsar.apps.wsgi import WsgiResponse
from pulsar.utils.lib import WsgiResponse as Wsgi
common = {
200: 'OK',
400: 'Bad Request',
404: 'Not Found',
500: 'Internal Server Error'
}
class TestWsgi(unittest.TestCase):
__benchmark__ = True
__number__ = 20000
def setUp(self):
self.environ = {}
def test_python(self):
WsgiResponse(environ=self.environ, status_code=200)
def test_cython(self):
Wsgi(environ=self.environ, status_code=200)
def test_python_content(self):
WsgiResponse(environ=self.environ, status_code=200,
content='this is a test')
def test_cython_content(self):
Wsgi(environ=self.environ, status_code=200, content='this is a test')
def test_python_headers(self):
r = WsgiResponse(environ=self.environ, status_code=200,
content='this is a test')
r.get_headers()
def test_cython_headers(self):
r = Wsgi(environ=self.environ, status_code=200,
content='this is a test')
r.get_headers()
| bsd-3-clause | fcbd7689b7d4ca611628b369e15b455f | 24.813953 | 77 | 0.617117 | 3.627451 | false | true | false | false |
healthchecks/healthchecks | hc/front/management/commands/pygmentize.py | 1 | 1907 | from __future__ import annotations
from django.core.management.base import BaseCommand
def _process(name, lexer):
from pygments import highlight
from pygments.formatters import HtmlFormatter
source = open("templates/front/snippets/%s.txt" % name).read()
processed = highlight(source, lexer, HtmlFormatter())
processed = processed.replace("PING_URL", "{{ ping_url }}")
processed = processed.replace("SITE_ROOT", "{{ SITE_ROOT }}")
processed = processed.replace("PING_ENDPOINT", "{{ PING_ENDPOINT }}")
with open("templates/front/snippets/%s.html" % name, "w") as out:
out.write(processed)
class Command(BaseCommand):
help = "Compiles snippets with Pygments"
def handle(self, *args, **options):
try:
from pygments import lexers
except ImportError:
self.stdout.write("This command requires the Pygments package.")
self.stdout.write("Please install it with:\n\n")
self.stdout.write(" pip install Pygments\n\n")
return
# Invocation examples
_process("bash_curl", lexers.BashLexer())
_process("bash_wget", lexers.BashLexer())
_process("browser", lexers.JavascriptLexer())
_process("cs", lexers.CSharpLexer())
_process("node", lexers.JavascriptLexer())
_process("go", lexers.GoLexer())
_process("python_urllib2", lexers.PythonLexer())
_process("python_requests", lexers.PythonLexer())
_process("python_requests_fail", lexers.PythonLexer())
_process("python_requests_start", lexers.PythonLexer())
_process("python_requests_payload", lexers.PythonLexer())
_process("php", lexers.PhpLexer(startinline=True))
_process("powershell", lexers.shell.PowerShellLexer())
_process("powershell_inline", lexers.shell.BashLexer())
_process("ruby", lexers.RubyLexer())
| bsd-3-clause | b99a275b83fb004fd054d0a80c4ebae8 | 39.574468 | 76 | 0.648663 | 4.014737 | false | false | false | false |
healthchecks/healthchecks | hc/api/migrations/0065_auto_20191127_1240.py | 2 | 1098 | # Generated by Django 2.2.6 on 2019-11-27 12:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0064_auto_20191119_1346'),
]
operations = [
migrations.AddField(
model_name='check',
name='methods',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='channel',
name='kind',
field=models.CharField(choices=[('email', 'Email'), ('webhook', 'Webhook'), ('hipchat', 'HipChat'), ('slack', 'Slack'), ('pd', 'PagerDuty'), ('pagertree', 'PagerTree'), ('pagerteam', 'Pager Team'), ('po', 'Pushover'), ('pushbullet', 'Pushbullet'), ('opsgenie', 'OpsGenie'), ('victorops', 'VictorOps'), ('discord', 'Discord'), ('telegram', 'Telegram'), ('sms', 'SMS'), ('zendesk', 'Zendesk'), ('trello', 'Trello'), ('matrix', 'Matrix'), ('whatsapp', 'WhatsApp'), ('apprise', 'Apprise'), ('mattermost', 'Mattermost'), ('msteams', 'Microsoft Teams'), ('shell', 'Shell Command')], max_length=20),
),
]
| bsd-3-clause | 9603f442276822ab6582cc98d2586b60 | 46.73913 | 604 | 0.575592 | 3.43125 | false | false | false | false |
healthchecks/healthchecks | hc/front/tests/test_switch_channel.py | 1 | 2299 | from __future__ import annotations
from hc.accounts.models import Project
from hc.api.models import Channel, Check
from hc.test import BaseTestCase
class SwitchChannelTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.check = Check.objects.create(project=self.project)
self.channel = Channel(project=self.project, kind="email")
self.channel.value = "alice@example.org"
self.channel.save()
self.url = f"/checks/{self.check.code}/channels/{self.channel.code}/enabled"
def test_it_enables(self):
self.client.login(username="alice@example.org", password="password")
self.client.post(self.url, {"state": "on"})
self.assertTrue(self.channel in self.check.channel_set.all())
def test_it_disables(self):
self.check.channel_set.add(self.channel)
self.client.login(username="alice@example.org", password="password")
self.client.post(self.url, {"state": "off"})
self.assertFalse(self.channel in self.check.channel_set.all())
def test_it_checks_ownership(self):
self.client.login(username="charlie@example.org", password="password")
r = self.client.post(self.url, {"state": "on"})
self.assertEqual(r.status_code, 404)
def test_it_checks_channels_ownership(self):
charlies_project = Project.objects.create(owner=self.charlie)
cc = Check.objects.create(project=charlies_project)
# Charlie will try to assign Alice's channel to his check:
self.url = f"/checks/{cc.code}/channels/{self.channel.code}/enabled"
self.client.login(username="charlie@example.org", password="password")
r = self.client.post(self.url, {"state": "on"})
self.assertEqual(r.status_code, 400)
def test_it_allows_cross_team_access(self):
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url, {"state": "on"})
self.assertEqual(r.status_code, 200)
def test_it_requires_rw_access(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url, {"state": "on"})
self.assertEqual(r.status_code, 403)
| bsd-3-clause | ed01358442838aef4cd28c7d98244fb7 | 37.316667 | 84 | 0.661157 | 3.564341 | false | true | false | false |
healthchecks/healthchecks | hc/front/tests/test_add_pagertree.py | 1 | 1739 | from __future__ import annotations
from django.test.utils import override_settings
from hc.api.models import Channel
from hc.test import BaseTestCase
class AddPagerTreeTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.url = "/projects/%s/add_pagertree/" % self.project.code
def test_instructions_work(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertContains(r, "PagerTree")
def test_it_works(self):
form = {"value": "http://example.org"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, self.channels_url)
c = Channel.objects.get()
self.assertEqual(c.kind, "pagertree")
self.assertEqual(c.value, "http://example.org")
self.assertEqual(c.project, self.project)
def test_it_rejects_bad_url(self):
form = {"value": "not an URL"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, form)
self.assertContains(r, "Enter a valid URL")
def test_it_requires_rw_access(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
self.client.login(username="bob@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 403)
@override_settings(PAGERTREE_ENABLED=False)
def test_it_handles_disabled_integration(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 404)
| bsd-3-clause | 8ffc3c9f7fcda1de8f0c8d41f2334fb3 | 33.78 | 76 | 0.655549 | 3.578189 | false | true | false | false |
healthchecks/healthchecks | hc/api/migrations/0027_auto_20161213_1059.py | 2 | 1514 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-13 10:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("api", "0026_auto_20160415_1824")]
operations = [
migrations.AddField(
model_name="check",
name="kind",
field=models.CharField(
choices=[("simple", "Simple"), ("cron", "Cron")],
default="simple",
max_length=10,
),
),
migrations.AddField(
model_name="check",
name="schedule",
field=models.CharField(default="* * * * *", max_length=100),
),
migrations.AddField(
model_name="check",
name="tz",
field=models.CharField(default="UTC", max_length=36),
),
migrations.AlterField(
model_name="channel",
name="kind",
field=models.CharField(
choices=[
("email", "Email"),
("webhook", "Webhook"),
("hipchat", "HipChat"),
("slack", "Slack"),
("pd", "PagerDuty"),
("po", "Pushover"),
("pushbullet", "Pushbullet"),
("opsgenie", "OpsGenie"),
("victorops", "VictorOps"),
],
max_length=20,
),
),
]
| bsd-3-clause | dea1f54332860962b573a94578efaae1 | 29.28 | 72 | 0.439894 | 4.519403 | false | false | false | false |
onepercentclub/onepercentclub-site | apps/vouchers/urlsapi.py | 1 | 1059 | from django.conf.urls import patterns
urlpatterns = patterns('',
# Voucher code is disabled for now.
#
# surl(r'^orders/<order_pk:#>/vouchers/$', OrderVoucherList.as_view(), name='fund-order-voucher-list'),
# surl(r'^orders/<order_pk:#>/vouchers/<pk:#>$', OrderVoucherDetail.as_view(), name='fund-order-voucher-detail'),
#
# url(r'^orders/current/vouchers/$', OrderVoucherList.as_view(), {'alias': 'current'}, name='fund-order-current-voucher-list'),
# surl(r'^orders/current/vouchers/<pk:#>$', OrderVoucherDetail.as_view(), {'alias': 'current'}, name='fund-order-current-voucher-detail'),
#
# # Vouchers
# surl(r'^vouchers/<code:s>$', VoucherDetail.as_view(), name='voucher-detail'),
# surl(r'^vouchers/<code:s>/donations/$', VoucherDonationList.as_view(), name='voucher-donation-list'),
# surl(r'^vouchers/<code:s>/donations/<pk:#>$', VoucherDonationDetail.as_view(), name='voucher-donation-list'),
# surl(r'^customvouchers/$', CustomVoucherRequestList.as_view(), name='custom-voucher-request-list'),
)
| bsd-3-clause | 9ede93cdf5fa04335e6d4d8f3c4219b2 | 57.833333 | 142 | 0.668555 | 3.078488 | false | false | false | false |
onepercentclub/onepercentclub-site | apps/donations/wallposts.py | 1 | 2096 | from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from apps.fund.models import Donation, DonationStatuses
from bluebottle.wallposts.models import SystemWallPost
@receiver(post_save, weak=False, sender=Donation)
def create_donation_post(sender, instance, **kwargs):
donation = instance
if donation.status in [DonationStatuses.paid, DonationStatuses.pending]:
donation_type = ContentType.objects.get_for_model(donation)
post = SystemWallPost.objects.filter(related_id=donation.id).filter(related_type=donation_type).all()
if len(post) < 1:
if donation.donation_type in [Donation.DonationTypes.one_off, Donation.DonationTypes.voucher]:
if donation.voucher:
post = SystemWallPost()
post.content_object = donation.project
post.related_object = donation.voucher
post.author = donation.user
post.ip = '127.0.0.1'
post.save()
elif donation.fundraiser:
# Post on Project Wall.
post = SystemWallPost()
post.content_object = donation.project
post.related_object = donation
post.author = donation.user
post.ip = '127.0.0.1'
post.save()
# Post on FundRaiser Wall.
post = SystemWallPost()
post.content_object = donation.fundraiser
post.related_object = donation
post.author = donation.user
post.ip = '127.0.0.1'
post.save()
else:
post = SystemWallPost()
post.content_object = donation.project
post.related_object = donation
post.author = donation.user
post.ip = '127.0.0.1'
post.save()
| bsd-3-clause | fa910593ded95419a750efaef6b38ded | 40.92 | 109 | 0.555821 | 4.39413 | false | false | false | false |
onepercentclub/onepercentclub-site | apps/fund/migrations/0008_payout_status.py | 1 | 32490 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
orm['payouts.ProjectPayout'].objects.filter(status='completed').update(status='settled')
orm['payouts.ProjectPayout'].objects.filter(status='progress').update(status='in_progress')
orm['payouts.OrganizationPayout'].objects.filter(status='completed').update(status='settled')
orm['payouts.OrganizationPayout'].objects.filter(status='progress').update(status='in_progress')
def backwards(self, orm):
orm['payouts.ProjectPayout'].objects.filter(status='settled').update(status='completed')
orm['payouts.ProjectPayout'].objects.filter(status='in_progress').update(status='progress')
orm['payouts.OrganizationPayout'].objects.filter(status='settled').update(status='completed')
orm['payouts.OrganizationPayout'].objects.filter(status='in_progress').update(status='progress')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fund.donation': {
'Meta': {'object_name': 'Donation'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'one_off'", 'max_length': '20', 'db_index': 'True'}),
'fundraiser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_donations'", 'null': 'True', 'to': u"orm['fundraisers.FundRaiser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donations'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'old_donations'", 'to': u"orm['projects.Project']"}),
'ready': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['members.Member']", 'null': 'True', 'blank': 'True'}),
'voucher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vouchers.Voucher']", 'null': 'True', 'blank': 'True'})
},
u'fund.order': {
'Meta': {'ordering': "('-updated',)", 'object_name': 'Order'},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'}),
'recurring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'current'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_orders'", 'null': 'True', 'to': u"orm['members.Member']"})
},
u'fundraisers.fundraiser': {
'Meta': {'object_name': 'FundRaiser'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': "'10'"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['members.Member']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'geo.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"})
},
u'geo.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'geo.subregion': {
'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"})
},
u'members.member': {
'Meta': {'object_name': 'Member'},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'available_time': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'disable_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skypename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}),
'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'})
},
u'organizations.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
'account_bank_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_bank_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_bank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11', 'blank': 'True'}),
'account_holder_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_holder_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_holder_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}),
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_line1': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address_line2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'partner_organizations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'registration': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'payouts.organizationpayout': {
'Meta': {'ordering': "['start_date']", 'unique_together': "(('start_date', 'end_date'),)", 'object_name': 'OrganizationPayout'},
'completed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_reference': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'organization_fee_excl': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'organization_fee_incl': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'organization_fee_vat': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'other_costs_excl': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'other_costs_incl': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'other_costs_vat': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'payable_amount_excl': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'payable_amount_incl': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'payable_amount_vat': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'planned': ('django.db.models.fields.DateField', [], {}),
'psp_fee_excl': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'psp_fee_incl': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'psp_fee_vat': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'}),
'submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'payouts.projectpayout': {
'Meta': {'ordering': "['-created']", 'object_name': 'ProjectPayout'},
'amount_payable': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'amount_raised': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'completed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description_line1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'description_line2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'description_line3': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'description_line4': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_reference': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'organization_fee': ('bluebottle.bb_projects.fields.MoneyField', [], {'max_digits': '12', 'decimal_places': '2'}),
'payout_rule': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'planned': ('django.db.models.fields.DateField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'receiver_account_bic': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'receiver_account_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'receiver_account_country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'receiver_account_iban': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'receiver_account_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'receiver_account_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'sender_account_number': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'}),
'submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'projects.partnerorganization': {
'Meta': {'object_name': 'PartnerOrganization'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'projects.project': {
'Meta': {'ordering': "['title']", 'object_name': 'Project'},
'allow_overfunding': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'amount_asked': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'amount_donated': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'amount_needed': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'campaign_ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'campaign_funded': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'campaign_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_submitted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'effects': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'for_who': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'future': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'is_campaign': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Language']", 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'mchanga_account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organization'", 'null': 'True', 'to': u"orm['organizations.Organization']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['members.Member']"}),
'partner_organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.PartnerOrganization']", 'null': 'True', 'blank': 'True'}),
'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'popularity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reach': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'skip_monthly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectPhase']"}),
'story': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
},
u'utils.language': {
'Meta': {'ordering': "['language_name']", 'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'native_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vouchers.voucher': {
'Meta': {'object_name': 'Voucher'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchers'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'receiver'", 'null': 'True', 'to': u"orm['members.Member']"}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'receiver_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sender'", 'null': 'True', 'to': u"orm['members.Member']"}),
'sender_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'sender_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['payouts', 'fund']
symmetrical = True
| bsd-3-clause | 74d1ff664b6bac7aef2f9063a94ec4d9 | 93.723032 | 197 | 0.557495 | 3.586093 | false | false | false | false |
onepercentclub/onepercentclub-site | apps/bluebottle_salesforce/sync.py | 1 | 33097 | import logging
from apps.recurring_donations.models import MonthlyDonor
from bluebottle.payments.models import OrderPayment
import re
from django.utils import timezone
from registration.models import RegistrationProfile
from apps.cowry_docdata.models import payment_method_mapping
from apps.projects.models import ProjectBudgetLine
from apps.organizations.models import Organization, OrganizationMember
from apps.tasks.models import Task, TaskMember
from bluebottle.donations.models import Donation
from apps.vouchers.models import Voucher, VoucherStatuses
from bluebottle.fundraisers.models import FundRaiser
from apps.projects.models import Project
from apps.members.models import Member
from apps.bluebottle_salesforce.models import SalesforceOrganization, SalesforceContact, SalesforceProject, \
SalesforceDonation, SalesforceProjectBudget, SalesforceTask, SalesforceTaskMembers, SalesforceVoucher, \
SalesforceLogItem, SalesforceFundraiser, SalesforceOrganizationMember
logger = logging.getLogger('bluebottle.salesforce')
re_email = re.compile("^[A-Z0-9._%+-/!#$%&'*=?^_`{|}~]+@[A-Z0-9.-]+\\.[A-Z]{2,4}$")
def sync_organizations(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
organizations = Organization.objects.all()
if sync_from_datetime:
organizations = organizations.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} Organization objects.".format(organizations.count()))
for organization in organizations:
logger.debug("Syncing Organization: {0}".format(organization.id))
# Find the corresponding SF organization
try:
sforganization = SalesforceOrganization.objects.get(external_id=organization.id)
except SalesforceOrganization.DoesNotExist:
sforganization = SalesforceOrganization()
except Exception as e:
logger.error("Error while loading sforganization id {0} - stopping: ".format(organization.id) + str(e))
return success_count, error_count+1
# Populate the data from the source
sforganization.name = organization.name
sforganization.billing_city = organization.city[:40]
sforganization.billing_street = organization.address_line1 + " " + organization.address_line2
sforganization.billing_postal_code = organization.postal_code
sforganization.billing_state = organization.state[:20]
if organization.country:
sforganization.billing_country = organization.country.name
else:
sforganization.billing_country = ''
if organization.email and re_email.match(organization.email.upper()):
sforganization.email_address = organization.email
elif organization.email:
logger.error("Organization has invalid e-mail address '{0}', org id {1}. "
"Ignoring e-mail address field.".format(organization.email, organization.id))
sforganization.phone = organization.phone_number
sforganization.website = organization.website
sforganization.twitter = organization.twitter
sforganization.facebook = organization.facebook
sforganization.skype = organization.skype
sforganization.tags = ""
for tag in organization.tags.all():
sforganization.tags = str(tag) + ", " + sforganization.tags
sforganization.bank_account_name = organization.account_holder_name
sforganization.bank_account_address = organization.account_holder_address
sforganization.bank_account_postalcode = organization.account_holder_postal_code
sforganization.bank_account_city = organization.account_holder_city
if organization.account_holder_country:
sforganization.bank_account_country = organization.account_holder_country.name
else:
sforganization.bank_account_country = ''
sforganization.bank_name = organization.account_bank_name
sforganization.bank_address = organization.account_bank_address
sforganization.bank_postalcode = organization.account_bank_postal_code
sforganization.bank_city = organization.account_bank_city
if organization.account_bank_country:
sforganization.bank_country = organization.account_bank_country.name
else:
sforganization.bank_country = ''
sforganization.bank_account_number = organization.account_number
sforganization.bank_bic_swift = organization.account_bic
sforganization.bank_account_iban = organization.account_iban
sforganization.external_id = organization.id
sforganization.created_date = organization.created
sforganization.deleted_date = organization.deleted
# Save the object to Salesforce
if not dry_run:
try:
sforganization.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving organization id {0}: ".format(organization.id) + str(e))
return success_count, error_count
def sync_users(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
users = Member.objects.all()
if sync_from_datetime:
users = users.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} User objects.".format(users.count()))
for user in users:
logger.debug("Syncing User: {0}".format(user.id))
# Find the corresponding SF user.
try:
contact = SalesforceContact.objects.get(external_id=user.id)
except SalesforceContact.DoesNotExist:
contact = SalesforceContact()
except Exception as e:
logger.error("Error while loading sfcontact id {0} - stopping: ".format(user.id) + str(e))
return success_count, error_count+1
# Populate the data from the source
contact.external_id = user.id
contact.user_name = user.username
if re_email.match(user.email.upper()):
contact.email = user.email
else:
logger.error("User has invalid e-mail address '{0}', member id {1}. "
"Ignoring e-mail address field.".format(user.email, user.id))
contact.is_active = user.is_active
contact.member_since = user.date_joined
contact.date_joined = user.date_joined
contact.deleted = user.deleted
contact.category1 = Member.UserType.values[user.user_type].title()
contact.first_name = user.first_name
if user.last_name.strip():
contact.last_name = user.last_name
else:
contact.last_name = "1%MEMBER"
contact.location = user.location
contact.website = user.website
contact.picture_location = ""
if user.picture:
contact.picture_location = str(user.picture)
contact.about_me_us = user.about
contact.why_one_percent_member = user.why
contact.availability = user.available_time
contact.facebook = user.facebook
contact.twitter = user.twitter
contact.skype = user.skypename
contact.primary_language = user.primary_language
contact.receive_newsletter = user.newsletter
contact.phone = user.phone_number
contact.birth_date = user.birthdate
if user.gender == "male":
contact.gender = Member.Gender.values['male'].title()
elif user.gender == "female":
contact.gender = Member.Gender.values['female'].title()
else:
contact.gender = ""
contact.tags = ""
for tag in user.tags.all():
contact.tags = str(tag) + ", " + contact.tags
if user.address:
contact.mailing_city = user.address.city
contact.mailing_street = user.address.line1 + ' ' + user.address.line2
if user.address.country:
contact.mailing_country = user.address.country.name
else:
contact.mailing_country = ''
contact.mailing_postal_code = user.address.postal_code
contact.mailing_state = user.address.state
else:
contact.mailing_city = ''
contact.mailing_street = ''
contact.mailing_country = ''
contact.mailing_postal_code = ''
contact.mailing_state = ''
# Determine if the user has activated himself, by default assume not
# if this is a legacy record, by default assume it has activated
contact.has_activated = False
try:
rp = RegistrationProfile.objects.get(user_id=user.id)
contact.tags = rp.activation_key
if rp.activation_key == RegistrationProfile.ACTIVATED:
contact.has_activated = True
except RegistrationProfile.DoesNotExist:
if not user.is_active and user.date_joined == user.last_login:
contact.has_activated = False
else:
contact.has_activated = True
contact.last_login = user.last_login
# Bank details of recurring payments
try:
monthly_donor = MonthlyDonor.objects.get(user=user)
contact.bank_account_city = monthly_donor.city
contact.bank_account_holder = monthly_donor.name
contact.bank_account_number = ''
contact.bank_account_iban = monthly_donor.iban
contact.bank_account_active_recurring_debit = monthly_donor.active
except MonthlyDonor.DoesNotExist:
contact.bank_account_city = ''
contact.bank_account_holder = ''
contact.bank_account_number = ''
contact.bank_account_iban = ''
contact.bank_account_active_recurring_debit = False
# Save the object to Salesforce
if not dry_run:
try:
contact.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving contact id {0}: ".format(user.id) + str(e))
return success_count, error_count
def sync_projects(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
projects = Project.objects.all()
if sync_from_datetime:
projects = projects.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} Project objects.".format(projects.count()))
for project in projects:
logger.debug("Syncing Project: {0}".format(project.id))
# Find the corresponding SF project.
try:
sfproject = SalesforceProject.objects.get(external_id=project.id)
except SalesforceProject.DoesNotExist:
sfproject = SalesforceProject()
except Exception as e:
logger.error("Error while loading sfproject id {0} - stopping: ".format(project.id) + str(e))
return success_count, error_count+1
# Populate the data
sfproject.external_id = project.id
sfproject.project_name = project.title
sfproject.describe_the_project_in_one_sentence = project.pitch[:5000]
sfproject.video_url = project.video_url
sfproject.date_project_deadline = project.deadline or None
sfproject.is_campaign = project.is_campaign
sfproject.amount_at_the_moment = "%01.2f" % (project.amount_donated or 0)
sfproject.amount_requested = "%01.2f" % (project.amount_asked or 0)
sfproject.amount_still_needed = "%01.2f" % (project.amount_needed or 0)
sfproject.allow_overfunding = project.allow_overfunding
sfproject.story = project.story
sfproject.date_plan_submitted = project.date_submitted
sfproject.date_started = project.campaign_started
sfproject.date_ended = project.campaign_ended
sfproject.date_funded = project.campaign_funded
sfproject.picture_location = ""
if project.image:
sfproject.picture_location = str(project.image)
try:
sfproject.project_owner = SalesforceContact.objects.get(external_id=project.owner.id)
except SalesforceContact.DoesNotExist:
logger.error("Unable to find contact id {0} in Salesforce for project id {1}".format(project.owner.id,
project.id))
if project.organization:
try:
sfproject.organization_account = SalesforceOrganization.objects.get(
external_id=project.organization_id)
except SalesforceOrganization.DoesNotExist:
logger.error("Unable to find organization id {0} in Salesforce for project id {1}".format(
project.organization.id, project.id))
if project.country:
sfproject.country_in_which_the_project_is_located = project.country.name.encode("utf-8")
sfproject.sub_region = project.country.subregion.name.encode("utf-8")
sfproject.region = project.country.subregion.region.name.encode("utf-8")
sfproject.theme = ""
if project.theme:
sfproject.theme = project.theme.name
if project.status:
sfproject.status_project = project.status.name.encode("utf-8")
sfproject.project_created_date = project.created
sfproject.project_updated_date = project.updated
sfproject.tags = ""
for tag in project.tags.all():
sfproject.tags = str(tag) + ", " + sfproject.tags
sfproject.tags = sfproject.tags[:255]
sfproject.partner_organization = "-"
if project.partner_organization:
sfproject.partner_organization = project.partner_organization.name
sfproject.slug = project.slug
sfproject.donation_total = "%01.2f" % (project.get_money_total(['paid', 'pending']))
sfproject.donation_oo_total = "%01.2f" % (project.get_money_total(['paid', 'pending']))
sfproject.supporter_count = project.supporters_count()
sfproject.supporter_oo_count = project.supporters_count(True)
# Save the object to Salesforce
if not dry_run:
try:
sfproject.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving project id {0}: ".format(project.id) + str(e))
return success_count, error_count
def sync_fundraisers(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
fundraisers = FundRaiser.objects.all()
if sync_from_datetime:
fundraisers = fundraisers.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} Fundraiser objects.".format(fundraisers.count()))
for fundraiser in fundraisers:
logger.debug("Syncing Fundraiser: {0}".format(fundraiser.id))
# Find the corresponding SF Fundraiser.
try:
sffundraiser = SalesforceFundraiser.objects.get(external_id=fundraiser.id)
except SalesforceFundraiser.DoesNotExist:
sffundraiser = SalesforceFundraiser()
except Exception as e:
logger.error("Error while loading sffundraiser id {0} - stopping: ".format(fundraiser.id) + str(e))
return success_count, error_count+1
# Populate the data
sffundraiser.external_id = fundraiser.id
try:
sffundraiser.owner = SalesforceContact.objects.get(external_id=fundraiser.owner.id)
except SalesforceContact.DoesNotExist:
logger.error("Unable to find contact id {0} in Salesforce for fundraiser id {1}".format(fundraiser.owner.id,
fundraiser.id))
try:
sffundraiser.project = SalesforceProject.objects.get(external_id=fundraiser.project.id)
except SalesforceProject.DoesNotExist:
logger.error("Unable to find project id {0} in Salesforce for fundraiser id {1}".format(
fundraiser.project.id, fundraiser.id))
sffundraiser.picture_location = ""
if fundraiser.image:
sffundraiser.picture_location = str(fundraiser.image)
sffundraiser.name = fundraiser.title[:80]
sffundraiser.description = fundraiser.description
sffundraiser.video_url = fundraiser.video_url
sffundraiser.amount = '%01.2f' % (float(fundraiser.amount) / 100)
sffundraiser.amount_at_the_moment = '%01.2f' % (float(fundraiser.amount_donated) / 100)
sffundraiser.deadline = fundraiser.deadline.date()
sffundraiser.created = fundraiser.created
# Save the object to Salesforce
if not dry_run:
try:
sffundraiser.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving fundraiser id {0}: ".format(fundraiser.id) + str(e))
return success_count, error_count
def sync_projectbudgetlines(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
budget_lines = ProjectBudgetLine.objects.all()
if sync_from_datetime:
budget_lines = budget_lines.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} BudgetLine objects.".format(budget_lines.count()))
for budget_line in budget_lines:
logger.debug("Syncing BudgetLine: {0}".format(budget_line.id))
# Find the corresponding SF budget lines.
try:
sfbudget_line = SalesforceProjectBudget.objects.get(external_id=budget_line.id)
except SalesforceProjectBudget.DoesNotExist:
sfbudget_line = SalesforceProjectBudget()
except Exception as e:
logger.error("Error while loading sfbudget_line id {0} - stopping: ".format(budget_line.id) + str(e))
return success_count, error_count+1
# Populate the data
sfbudget_line.costs = "%01.2f" % (budget_line.amount / 100)
sfbudget_line.description = budget_line.description
sfbudget_line.external_id = budget_line.id
try:
sfbudget_line.project = SalesforceProject.objects.get(external_id=budget_line.project.id)
except SalesforceProject.DoesNotExist:
logger.error("Unable to find project id {0} in Salesforce for budget line id {1}".format(
budget_line.project.id, budget_line.id))
# Save the object to Salesforce
if not dry_run:
try:
sfbudget_line.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving budget line id {0}: ".format(budget_line.id) + str(e))
return success_count, error_count
def sync_donations(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
donations = Donation.objects.all()
if sync_from_datetime:
donations = donations.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} Donation objects.".format(donations.count()))
for donation in donations:
logger.debug("Syncing Donation: {0}".format(donation.id))
# Find the corresponding SF donation.
try:
sfdonation = SalesforceDonation.objects.get(external_id_donation=donation.id)
except SalesforceDonation.DoesNotExist:
sfdonation = SalesforceDonation()
except Exception as e:
logger.error("Error while loading sfdonation id {0} - stopping: ".format(donation.id) + str(e))
return success_count, error_count+1
# Poplate the data
sfdonation.external_id_donation = donation.id
sfdonation.amount = "%01.2f" % donation.amount
if donation.user:
try:
sfdonation.donor = SalesforceContact.objects.get(external_id=donation.order.user.id)
except SalesforceContact.DoesNotExist:
logger.error("Unable to find contact id {0} in Salesforce for donation id {1}".format(
donation.order.user.id, donation.id))
if donation.project:
try:
sfdonation.project = SalesforceProject.objects.get(external_id=donation.project.id)
except SalesforceProject.DoesNotExist:
logger.error("Unable to find project id {0} in Salesforce for donation id {1}".format(
donation.project.id, donation.id))
if donation.fundraiser:
try:
sfdonation.fundraiser = SalesforceFundraiser.objects.get(external_id=donation.fundraiser.id)
except SalesforceFundraiser.DoesNotExist:
logger.error("Unable to find fundraiser id {0} in Salesforce for donation id {1}".format(
donation.fundraiser.id, donation.id))
sfdonation.stage_name = donation.order.get_status_display()
sfdonation.close_date = donation.created
sfdonation.donation_created_date = donation.created
sfdonation.donation_updated_date = donation.updated
sfdonation.donation_ready_date = donation.completed or None
sfdonation.type = donation.order.order_type
if donation.user and donation.order.user.get_full_name() != '':
sfdonation.name = donation.order.user.get_full_name()
else:
sfdonation.name = "Anonymous"
sfdonation.record_type = "012A0000000ZK6FIAW"
# Get the payment method from the associated order / payment
sfdonation.payment_method = payment_method_mapping[''] # Maps to Unknown for DocData.
if donation.order:
lp = OrderPayment.get_latest_by_order(donation.order)
if lp and lp.payment_method in payment_method_mapping:
sfdonation.payment_method = payment_method_mapping[lp.payment_method]
# Save the object to Salesforce
if not dry_run:
try:
sfdonation.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving donation id {0}: ".format(donation.id) + str(e))
return success_count, error_count
def sync_vouchers(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
vouchers = Voucher.objects.all()
if sync_from_datetime:
vouchers = vouchers.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} Voucher objects.".format(vouchers.count()))
for voucher in vouchers:
logger.debug("Syncing Voucher: {0}".format(voucher.id))
# Find the corresponding SF vouchers.
try:
sfvoucher = SalesforceVoucher.objects.get(external_id_voucher=voucher.id)
except SalesforceVoucher.DoesNotExist:
sfvoucher = SalesforceVoucher()
# Initialize the Contact object that refers to the voucher purchaser
try:
sfvoucher.purchaser = SalesforceContact.objects.get(external_id=voucher.sender_id)
except SalesforceContact.DoesNotExist:
logger.error("Unable to find purchaser contact id {0} in Salesforce for voucher id {1}".format(
voucher.sender_id, voucher.id))
# SF Layout: Donation Information section.
sfvoucher.amount = "%01.2f" % (float(voucher.amount) / 100)
sfvoucher.close_date = voucher.created
sfvoucher.description = voucher.message
# sfvoucher.stage_name exists as state: "In progress", however this has been shifted to Donation?
sfvoucher.stage_name = VoucherStatuses.values[voucher.status].title()
#sfvoucher.payment_method = ""
if sfvoucher.purchaser and sfvoucher.purchaser.last_name:
if sfvoucher.purchaser.first_name:
sfvoucher.name = sfvoucher.purchaser.first_name + " " + sfvoucher.purchaser.last_name
else:
sfvoucher.name = sfvoucher.purchaser.last_name
else:
sfvoucher.name = "1%MEMBER"
# sfvoucher.name exist in production: "NOT YET USED 1%VOUCHER" when stage_name is "In progress"
# sfvoucher.opportunity_type = ""
# SF Other.
sfvoucher.external_id_voucher = voucher.id
sfvoucher.record_type = "012A0000000BxfHIAS"
# Save the object to Salesforce
if not dry_run:
try:
sfvoucher.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving voucher id {0}: ".format(voucher.id) + str(e))
return success_count, error_count
def sync_tasks(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
tasks = Task.objects.all()
if sync_from_datetime:
tasks = tasks.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} Task objects.".format(tasks.count()))
for task in tasks:
logger.debug("Syncing Task: {0}".format(task.id))
# Find the corresponding SF tasks.
try:
sftask = SalesforceTask.objects.get(external_id=task.id)
except SalesforceTask.DoesNotExist:
sftask = SalesforceTask()
except Exception as e:
logger.error("Error while loading sftask id {0} - stopping: ".format(task.id) + str(e))
return success_count, error_count+1
# Populate the data
sftask.external_id = task.id
try:
sftask.project = SalesforceProject.objects.get(external_id=task.project.id)
except SalesforceProject.DoesNotExist:
logger.error("Unable to find project id {0} in Salesforce for task id {1}".format(task.project.id, task.id))
try:
sftask.author = SalesforceContact.objects.get(external_id=task.author.id)
except SalesforceContact.DoesNotExist:
logger.error("Unable to find contact id {0} in Salesforce for task id {1}".format(task.author.id, task.id))
sftask.deadline = task.deadline or None
sftask.effort = task.time_needed
sftask.extended_task_description = task.description
sftask.location_of_the_task = task.location
sftask.people_needed = task.people_needed
sftask.end_goal = task.end_goal
if task.skill:
sftask.task_expertise = task.skill.name.encode("utf-8")
sftask.task_status = task.status
sftask.title = task.title
sftask.task_created_date = task.created or None
sftask.tags = ""
for tag in task.tags.all():
sftask.tags = str(tag) + ", " + sftask.tags
sftask.date_realized = None
if task.status == 'realized' and task.date_status_change:
sftask.date_realized = task.date_status_change
# Save the object to Salesforce
if not dry_run:
try:
sftask.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving task id {0}: ".format(task.id) + str(e))
return success_count, error_count
def sync_taskmembers(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
task_members = TaskMember.objects.all()
if sync_from_datetime:
task_members = task_members.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} TaskMember objects.".format(task_members.count()))
for task_member in task_members:
logger.debug("Syncing TaskMember: {0}".format(task_member.id))
# Find the corresponding SF task members.
try:
sftaskmember = SalesforceTaskMembers.objects.get(external_id=task_member.id)
except SalesforceTaskMembers.DoesNotExist:
sftaskmember = SalesforceTaskMembers()
except Exception as e:
logger.error("Error while loading sftaskmember id {0} - stopping: ".format(task_member.id) + str(e))
return success_count, error_count+1
# Populate the data
sftaskmember.external_id = task_member.id
try:
sftaskmember.contacts = SalesforceContact.objects.get(external_id=task_member.member.id)
except SalesforceContact.DoesNotExist:
logger.error("Unable to find contact id {0} in Salesforce for task member id {1}".format(
task_member.member.id, task_member.id))
try:
sftaskmember.x1_club_task = SalesforceTask.objects.get(external_id=task_member.task.id)
except SalesforceTask.DoesNotExist:
logger.error("Unable to find task id {0} in Salesforce for task member id {1}".format(task_member.member.id,
task_member.id))
sftaskmember.motivation = task_member.motivation
sftaskmember.status = TaskMember.TaskMemberStatuses.values[task_member.status].title()
sftaskmember.taskmember_created_date = task_member.created
# Save the object to Salesforce
if not dry_run:
try:
sftaskmember.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving task member id {0}: ".format(task_member.id) + str(e))
return success_count, error_count
def sync_organizationmembers(dry_run, sync_from_datetime, loglevel):
logger.setLevel(loglevel)
error_count = 0
success_count = 0
org_members = OrganizationMember.objects.all()
if sync_from_datetime:
org_members = org_members.filter(updated__gte=sync_from_datetime)
logger.info("Syncing {0} OrganizationMember objects.".format(org_members.count()))
for org_member in org_members:
logger.debug("Syncing OrganizationMember: {0}".format(org_member.id))
# Find the corresponding SF organization members.
try:
sf_org_member = SalesforceOrganizationMember.objects.get(external_id=org_member.id)
except SalesforceOrganizationMember.DoesNotExist:
sf_org_member = SalesforceOrganizationMember()
logger.debug("Creating new SalesforceOrganizationMember")
except Exception as e:
logger.error("Error while loading sf_org_member id {0} - stopping: ".format(org_member.id) + str(e))
return success_count, error_count+1
# Populate the data
sf_org_member.external_id = org_member.id
try:
sf_org_member.contact = SalesforceContact.objects.get(external_id=org_member.user.id)
# logger.debug("Connecting contact id {0} in Salesforce".format(org_member.user.id))
except SalesforceContact.DoesNotExist:
logger.error("Unable to find contact id {0} in Salesforce for organization member id {1}".format(
org_member.user.id, org_member.id))
try:
sf_org_member.organization = SalesforceOrganization.objects.get(external_id=org_member.organization.id)
# logger.debug("Connecting organization id {0} in Salesforce".format(org_member.organization.id))
except SalesforceOrganization.DoesNotExist:
logger.error("Unable to find organization id {0} in Salesforce for organization member id {1}".format(
org_member.organization.id, org_member.id))
sf_org_member.role = org_member.function
# Save the object to Salesforce
if not dry_run:
try:
sf_org_member.save()
success_count += 1
except Exception as e:
error_count += 1
logger.error("Error while saving organization member id {0}: {1}".format(org_member.id, e))
return success_count, error_count
def send_log(filename, err, succ, command, command_ext, dry_run, loglevel):
logger.setLevel(loglevel)
sflog = SalesforceLogItem()
logger.info("Sending log to Salesforce...")
sflog.entered = timezone.localtime(timezone.now())
sflog.source = str(command)
sflog.source_extended = str(command_ext)
sflog.errors = err
sflog.successes = succ
with open(filename, "r") as logfile:
for line in logfile:
if len(line) > 1300:
sflog.message += line[:1300]
else:
sflog.message += line
# Save the object to Salesforce
if not dry_run:
try:
sflog.save()
except Exception as e:
logger.error("Error while saving log: " + str(e))
| bsd-3-clause | 644c2fd720ca6f942d5cd214d118d925 | 39.659705 | 120 | 0.637641 | 3.945756 | false | false | false | false |
onepercentclub/onepercentclub-site | apps/organizations/forms.py | 1 | 1111 | from django import forms
from django.contrib.admin.widgets import AdminFileWidget
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from .models import Organization, OrganizationDocument
# Widgets
class UploadWidget(forms.FileInput):
def render(self, name, value, attrs=None):
html = super(UploadWidget, self).render(name, value, attrs)
if value:
text = _('Change:')
else:
text = _('Add:')
html = format_html(
'<p class="url">{0} {1}</p>',
text, html
)
return html
# Forms
class OrganizationDocumentForm(forms.ModelForm):
class Meta:
model = OrganizationDocument
widgets = {
'file': UploadWidget()
}
def __init__(self, *args, **kwargs):
super(OrganizationDocumentForm, self).__init__(*args, **kwargs)
self.fields['file'].required = False
| bsd-3-clause | bad99b69d4fc94e78fcba66fc8447285 | 29.027027 | 71 | 0.643564 | 4.306202 | false | false | false | false |
onepercentclub/onepercentclub-site | apps/recurring_donations/tests/test_api.py | 1 | 4512 | from bluebottle.bb_projects.models import ProjectPhase
from bluebottle.geo.models import Country
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.geo import CountryFactory
from django.core.urlresolvers import reverse
from onepercentclub.tests.factory_models.project_factories import OnePercentProjectFactory
from onepercentclub.tests.utils import OnePercentTestCase
from rest_framework import status
class MonthlyDonationApiTest(OnePercentTestCase):
def setUp(self):
self.init_projects()
self.phase_campaign = ProjectPhase.objects.get(slug='campaign')
self.country = CountryFactory()
self.some_project = OnePercentProjectFactory.create(amount_asked=500, status=self.phase_campaign)
self.another_project = OnePercentProjectFactory.create(amount_asked=750, status=self.phase_campaign)
self.some_user = BlueBottleUserFactory.create()
self.some_user_token = "JWT {0}".format(self.some_user.get_jwt_token())
self.another_user = BlueBottleUserFactory.create()
self.another_user_token = "JWT {0}".format(self.another_user.get_jwt_token())
self.monthly_donation_url = reverse('monthly-donation-list')
self.monthly_donation_project_url = reverse('monthly-donation-project-list')
self.monthly_profile = {'iban': 'NL13TEST0123456789',
'bic': 'TESTNL2A',
'name': 'Nijntje het Konijntje',
'city': 'Amsterdam',
'country': self.country.id,
'amount': 50}
def test_create_monthly_donation(self):
"""
Tests for creating, retrieving, updating monthly donation.
"""
# Check that user has no monthly donation
response = self.client.get(self.monthly_donation_url, HTTP_AUTHORIZATION=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(response.data['count'], 0)
self.assertEqual(response.data['results'], [])
# Create a new monthly donation
response = self.client.post(self.monthly_donation_url, self.monthly_profile, HTTP_AUTHORIZATION=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(response.data['amount'], self.monthly_profile['amount'])
self.assertEqual(response.data['active'], True)
some_monthly_donation_id = response.data['id']
# Reload it and check that all is still well.
response = self.client.get(self.monthly_donation_url, HTTP_AUTHORIZATION=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0]['amount'], self.monthly_profile['amount'])
# Add a preferred projects
monthly_project = {
'donation': some_monthly_donation_id,
'project': self.some_project.slug
}
response = self.client.post(self.monthly_donation_project_url, monthly_project, HTTP_AUTHORIZATION=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
# Reload it. It should have that project embedded
response = self.client.get(self.monthly_donation_url, HTTP_AUTHORIZATION=self.some_user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(len(response.data['results'][0]['projects']), 1)
self.assertEqual(response.data['results'][0]['projects'][0]['project'], self.some_project.slug)
# Another should not have a monthly donation
response = self.client.get(self.monthly_donation_url, HTTP_AUTHORIZATION=self.another_user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(response.data['count'], 0)
# Another user can't add a project to first monthly donation
monthly_project = {
'donation': some_monthly_donation_id,
'project': self.another_project.slug
}
response = self.client.post(self.monthly_donation_project_url, monthly_project, HTTP_AUTHORIZATION=self.another_user_token)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
| bsd-3-clause | 600472e4c7ee571cc630b236154619e3 | 51.465116 | 131 | 0.682846 | 3.893011 | false | true | false | false |
jamesoff/simplemonitor | simplemonitor/Alerters/nc.py | 1 | 1456 | """
SimpleMonitor alerts via macOS Notification Center
"""
import platform
try:
import pync
PYNC_AVAILABLE = True
except ImportError:
PYNC_AVAILABLE = False
from ..Monitors.monitor import Monitor
from .alerter import Alerter, AlertLength, AlertType, register
@register
class NotificationCenterAlerter(Alerter):
"""Send alerts to the macOS Notification Center"""
alerter_type = "nc"
def __init__(self, config_options: dict) -> None:
super().__init__(config_options)
if platform.system() != "Darwin" or not PYNC_AVAILABLE:
self.alerter_logger.critical(
"This alerter (currently) only works on Mac OS X!"
)
return
def send_alert(self, name: str, monitor: Monitor) -> None:
"""Send the message"""
if not PYNC_AVAILABLE:
self.alerter_logger.critical("Missing pync package")
return
alert_type = self.should_alert(monitor)
message = ""
if alert_type not in [AlertType.FAILURE, AlertType.CATCHUP]:
return
message = self.build_message(AlertLength.NOTIFICATION, alert_type, monitor)
if not self._dry_run:
pync.notify(message=message, title="SimpleMonitor")
else:
self.alerter_logger.info("dry_run: would send message: %s", message)
def _describe_action(self) -> str:
return "sending notifications via Notification Center"
| bsd-3-clause | 209fba4c7b4ffdfe9b9d891b4d52144b | 28.12 | 83 | 0.635302 | 4.044444 | false | false | false | false |
jamesoff/simplemonitor | simplemonitor/Monitors/file.py | 1 | 1619 | """
File-based monitors for SimpleMonitor
"""
import os
import os.path
import time
from .monitor import Monitor, register
@register
class MonitorBackup(Monitor):
"""
Monitor Veritas BackupExec
May be out of date
"""
monitor_type = "backup"
filename = os.path.join(
"C:\\", "Program Files", "VERITAS", "Backup Exec", "status.txt"
)
def run_test(self) -> bool:
if not os.path.exists(self.filename):
return self.record_fail("Status file missing")
try:
fh = open(self.filename, "r")
except Exception:
return self.record_fail("Unable to open status file")
try:
status = fh.readline()
_timestamp = fh.readline()
except Exception:
return self.record_fail("Unable to read data from status file")
fh.close()
status = status.strip()
timestamp = int(_timestamp.strip())
if status not in ("ok", "running"):
return self.record_fail("Unknown status %s" % status)
now = int(time.time())
if timestamp > now:
return self.record_fail("Timestamp is ahead of now!")
gap = now - timestamp
if status == "ok":
if gap > (3600 * 24):
return self.record_fail("OK was reported %ds ago" % gap)
else:
if gap > (3600 * 7):
return self.record_fail("Backup has been running for %ds" % gap)
return self.record_success()
def describe(self) -> str:
"Checking Backup Exec runs daily, and doesn't run for too long."
| bsd-3-clause | 9fd82d197e3ce1ed683771c6a4b8805c | 24.698413 | 80 | 0.565781 | 4.119593 | false | false | false | false |
jamesoff/simplemonitor | simplemonitor/Loggers/seq.py | 1 | 3076 | """
Simplemonitor logger for seq
Inspiration from
https://raw.githubusercontent.com/eifinger/appdaemon-scripts/master/seqSink/seqSink.py
"""
import datetime
import json
from typing import cast
import requests
from ..Monitors.monitor import Monitor
from .logger import Logger, register
@register
class SeqLogger(Logger):
"""Logging to seq"""
logger_type = "seq"
only_failures = False
buffered = False
dateformat = None
def __init__(self, config_options: dict = None) -> None:
if config_options is None:
config_options = {}
super().__init__(config_options)
# i.e. http://192.168.0.5:5341
self.endpoint = cast(
str, self.get_config_option("endpoint", required=True, allow_empty=False)
)
# Potentially, would need to add a header for ApiKey
# Send message to indicate we have started logging
self.log_to_seq(
self.endpoint,
"SeqLogger",
"simpleMonitor",
"__init__",
None,
"logging enabled for simpleMonitor",
False,
)
def save_result2(self, name: str, monitor: Monitor) -> None:
try:
is_fail = monitor.test_success() is False
self.log_to_seq(
self.endpoint,
name,
monitor.name,
monitor.monitor_type,
str(monitor.get_params()),
monitor.describe(),
is_fail,
)
except Exception:
self.logger_logger.exception("Error sending to seq in %s", monitor.name)
def describe(self) -> str:
return "Sends simple log to seq using raw endpoint"
def log_to_seq(
self, endpoint, name, app_name, monitor_type, params, description, is_fail
):
"""Send an event to seq"""
event_data = {
"Timestamp": str(datetime.datetime.now()),
"Level": "Error" if is_fail is True else "Information",
"MessageTemplate": str(description),
"Properties": {
"Type": "simpleMonitor",
"Name": name,
"Monitor": str(app_name),
"MonitorType": monitor_type,
# "Params": params
},
}
if params is not None:
event_data["Properties"]["Params"] = params
request_body = {"Events": [event_data]}
try:
_ = json.dumps(request_body) # This just checks it is valid...
except TypeError:
self.logger_logger.error("Could not serialise %s", request_body)
return
try:
response = requests.post(self.endpoint, json=request_body)
if not response.status_code == 200 and not response.status_code == 201:
self.logger_logger.error(
"POST to seq failed with status code: %s", response
)
except requests.RequestException:
self.logger_logger.exception("Failed to log to seq")
| bsd-3-clause | ca8ec1a422deb10c9ef21777cfe0c3d3 | 29.156863 | 86 | 0.54974 | 4.344633 | false | false | false | false |
jamesoff/simplemonitor | simplemonitor/util/envconfig.py | 1 | 2768 | """A version of ConfigParser which supports subsitutions from environment variables."""
import os
import re
from configparser import BasicInterpolation, ConfigParser
from typing import Any, List, Optional
class EnvironmentAwareConfigParser(ConfigParser):
"""A subclass of ConfigParser which allows %env:VAR% interpolation via the
get method."""
r = re.compile("%env:([a-zA-Z0-9_]+)%")
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init with our specific interpolation class (for Python 3)"""
interpolation = EnvironmentAwareInterpolation()
kwargs["interpolation"] = interpolation
ConfigParser.__init__(self, *args, **kwargs)
def read(self, filenames: Any, encoding: Optional[str] = None) -> List[str]:
"""Load a config file and do environment variable interpolation on the section names."""
result = ConfigParser.read(self, filenames)
for section in self.sections():
original_section = section
matches = self.r.search(section)
while matches:
env_key = matches.group(1)
if env_key in os.environ:
section = section.replace(matches.group(0), os.environ[env_key])
else:
raise ValueError(
"Cannot find {0} in environment for config interpolation".format(
env_key
)
)
matches = self.r.search(section)
if section != original_section:
self.add_section(section)
for (option, value) in self.items(original_section):
self.set(section, option, value)
self.remove_section(original_section)
return result
class EnvironmentAwareInterpolation(BasicInterpolation):
"""An interpolation which substitutes values from the environment."""
r = re.compile("%env:([a-zA-Z0-9_]+)%")
def before_get(
self, parser: Any, section: str, option: str, value: Any, defaults: Any
) -> Any:
parser.get(section, option, raw=True, fallback=value)
matches = self.r.search(value)
old_value = value
while matches:
env_key = matches.group(1)
if env_key in os.environ:
value = value.replace(matches.group(0), os.environ[env_key])
else:
raise ValueError(
"Cannot find {0} in environment for config interpolation".format(
env_key
)
)
matches = self.r.search(value)
if value == old_value:
break
old_value = value
return value
| bsd-3-clause | ae732f2c01baa6277fa8242ed5236462 | 37.444444 | 96 | 0.569364 | 4.707483 | false | true | false | false |
dieseldev/diesel | tests/unit/test_pipeline.py | 1 | 3297 | #12345678
# That comment above matters (used in the test!)
from diesel.pipeline import Pipeline, PipelineClosed, PipelineCloseRequest
from cStringIO import StringIO
FILE = __file__
if FILE.endswith('.pyc') or FILE.endswith('.pyo'):
FILE = FILE[:-1]
def test_add_string():
p = Pipeline()
assert (p.add("foo") == None)
assert (not p.empty)
def test_add_file():
p = Pipeline()
assert (p.add(open(FILE)) == None)
assert (not p.empty)
def test_add_filelike():
p = Pipeline()
sio = StringIO()
assert (p.add(sio) == None)
assert (not p.empty)
def test_add_badtypes():
p = Pipeline()
class Whatever(object): pass
for item in [3, [], Whatever()]:
try:
p.add(item)
except ValueError:
pass
assert (p.empty)
def test_read_empty():
p = Pipeline()
assert (p.read(500) == '')
def test_read_string():
p = Pipeline()
p.add("foo")
assert (p.read(3) == "foo")
assert (p.empty)
def test_read_file():
p = Pipeline()
p.add(open(FILE))
assert (p.read(5) == "#1234")
def test_read_filelike():
p = Pipeline()
p.add(StringIO('abcdef'))
assert (p.read(5) == 'abcde')
def test_read_twice():
p = Pipeline()
p.add("foo")
assert (p.read(2) == "fo")
assert (p.read(2) == "o")
def test_read_twice_empty():
p = Pipeline()
p.add("foo")
assert (p.read(2) == "fo")
assert (p.read(2) == "o")
assert (p.read(2) == "")
def test_read_backup():
p = Pipeline()
p.add("foo")
assert (p.read(2) == "fo")
p.backup("fo")
assert (p.read(2) == "fo")
assert (p.read(2) == "o")
def test_read_backup_extra():
p = Pipeline()
p.add("foo")
assert (p.read(2) == "fo")
p.backup("foobar")
assert (p.read(500) == "foobaro")
def test_read_hybrid_objects():
p = Pipeline()
p.add("foo,")
p.add(StringIO("bar,"))
p.add(open(FILE))
assert (p.read(10) == "foo,bar,#1")
assert (p.read(4) == "2345")
p.backup("rock") # in the middle of the "file"
assert (p.read(6) == "rock67")
def test_close():
p = Pipeline()
p.add("foo")
p.add(StringIO("bar"))
p.close_request()
assert (p.read(1000) == "foobar")
try:
p.read(1000)
except PipelineCloseRequest:
pass
def test_long_1():
p = Pipeline()
p.add("foo")
assert (p.read(2) == "fo")
p.add("bar")
assert (p.read(3) == "oba")
p.backup("rocko")
p.add(StringIO("soma"))
assert (p.read(1000) == "rockorsoma")
assert (p.read(1000) == "")
assert (p.empty)
p.add("X" * 10000)
p.close_request()
assert (p.read(5000) == 'X' * 5000)
p.backup('XXX')
try:
p.add("newstuff")
except PipelineClosed:
pass
assert (not p.empty)
assert (p.read(100000) == 'X' * 5003)
assert (not p.empty)
try:
p.read(1000)
except PipelineCloseRequest:
pass
assert (not p.empty)
try:
p.read(1000)
except PipelineCloseRequest:
pass
def test_pri_clean():
p = Pipeline()
p.add("two")
p.add("three")
p.add("one")
assert (p.read(18) == "twothreeone")
p.add("two", 2)
p.add("three", 3)
p.add("six", 2)
p.add("one", 1)
assert (p.read(18) == "threetwosixone")
| bsd-3-clause | d5031886b9134162510f05210359feaa | 20.98 | 74 | 0.544131 | 2.94375 | false | true | false | false |
dieseldev/diesel | examples/udp_echo.py | 1 | 1225 | # vim:ts=4:sw=4:expandtab
'''Simple udp echo server and client.
'''
import sys
from diesel import (
UDPService, UDPClient, call, send, datagram, quickstart, receive,
)
class EchoClient(UDPClient):
"""A UDPClient example.
Very much like a normal Client but it can only receive datagrams
from the wire.
"""
@call
def say(self, msg):
send(msg)
return receive(datagram)
def echo_server():
"""The UDPService callback.
Unlike a standard Service callback that represents a connection and takes
the remote addr as the first function, a UDPService callback takes no
arguments. It is responsible for receiving datagrams from the wire and
acting upon them.
"""
while True:
data = receive(datagram)
send("you said %s" % data)
def echo_client():
client = EchoClient('localhost', 8013)
while True:
msg = raw_input("> ")
print client.say(msg)
if len(sys.argv) == 2:
if 'client' in sys.argv[1]:
quickstart(echo_client)
raise SystemExit
elif 'server' in sys.argv[1]:
quickstart(UDPService(echo_server, 8013))
raise SystemExit
print 'usage: python %s (server|client)' % sys.argv[0]
| bsd-3-clause | 710269079a1aa5de581b4a9c3a924c38 | 24.520833 | 77 | 0.64898 | 3.816199 | false | false | false | false |
dieseldev/diesel | examples/convoy.py | 1 | 1416 | from diesel.convoy import convoy, ConvoyRole
import kv_palm
convoy.register(kv_palm)
from kv_palm import ( GetRequest, GetOkay, GetMissing,
SetRequest, SetOkay )
class KvNode(ConvoyRole):
limit = 1
def __init__(self):
self.values = {}
ConvoyRole.__init__(self)
def handle_GetRequest(self, sender, request):
if request.key in self.values:
sender.respond(GetOkay(value=self.values[request.key]))
else:
sender.respond(GetMissing())
def handle_SetRequest(self, sender, request):
self.values[request.key] = request.value
sender.respond(SetOkay())
def run_sets():
print "I am here!"
convoy.send(SetRequest(key="foo", value="bar"))
print "I am here 2!"
convoy.send(SetRequest(key="foo", value="bar"))
print "I am here 3!"
print convoy.rpc(GetRequest(key="foo")).single
print "I am here 4!"
import time
t = time.time()
for x in xrange(5000):
convoy.send(SetRequest(key="foo", value="bar"))
r = convoy.rpc(GetRequest(key="foo")).single
print 5000.0 / (time.time() - t), "/ s"
print ''
print r
if __name__ == '__main__':
convoy.run_with_nameserver("localhost:11111", ["localhost:11111"], KvNode(), run_sets)
#import cProfile
#cProfile.run('convoy.run_with_nameserver("localhost:11111", ["localhost:11111"], KvNode(), run_sets)')
| bsd-3-clause | f31a8506b9912d207e61236584b50ebd | 30.466667 | 107 | 0.618644 | 3.232877 | false | false | false | false |
dieseldev/diesel | diesel/convoy/__init__.py | 1 | 12398 | from collections import defaultdict
from uuid import uuid4
from time import time
from random import choice
import operator as op
from palm.palm import ProtoBase
from functools import partial
from diesel import quickstart, Thunk, sleep, log, fork
from diesel.util.queue import Queue, first
from diesel.logmod import LOGLVL_DEBUG
from .convoy_env_palm import MessageResponse, MessageEnvelope
from .consensus.server import run_server as run_consensus_server
from .consensus.client import (ConvoyNameService, ConsensusSet,
ConvoySetFailed, ConvoySetTimeout,
ConvoyWaitDone)
from .messagenet import (me, ConvoyService,
MESSAGE_RES, MESSAGE_OUT,
host_loop)
class ConvoyRemoteException(object):
def __init__(self, s):
self.exc_desc = s
class ConvoyRemoteNull(object):
pass
class ConvoyRemoteResult(object):
def __init__(self, i):
self.i = i
@property
def single(self):
return self.i[0]
def __iter__(self):
return self.i
def __len__(self):
return len(self.i)
class ConvoyRemoteError(Exception): pass
class ConvoyTimeoutError(Exception): pass
class Convoy(object):
def __init__(self):
self.routes = defaultdict(set) # message name to host
self.local_handlers = {}
self.enabled_handlers = {}
self.classes = {}
self.host_queues = {}
self.run_nameserver = None
self.role_messages = defaultdict(list)
self.roles = set()
self.roles_wanted = set()
self.roles_owned = set()
self.role_clocks = {}
self.role_by_name = {}
self.incoming = Queue()
self.pending = {}
self.rpc_waits = {}
self.table_changes = Queue()
def run_with_nameserver(self, myns, nameservers, *objs):
self.run_nameserver = myns
self.run(nameservers, *objs)
def run(self, nameservers, *objs):
nameservers = [(h, int(p))
for h, p in (i.split(':')
for i in nameservers)]
runem = []
if self.run_nameserver:
runem.append(
Thunk(lambda: run_consensus_server(self.run_nameserver, nameservers)))
runem.append(self)
handler_functions = dict((v, k) for k, v in self.local_handlers.iteritems())
final_o = []
for o in objs:
if type(o.__class__) is ConvoyRegistrar:
r = o.__class__
self.roles_wanted.add(r)
for m in self.role_messages[r]:
assert m not in self.local_handlers, \
"cannot add two instances for same role/message"
self.local_handlers[m] = \
getattr(o, 'handle_' + m)
else:
final_o.append(o)
self.ns = ConvoyNameService(nameservers)
runem.append(self.ns)
runem.append(self.deliver)
runem.extend(final_o)
runem.append(ConvoyService())
quickstart(*runem)
def __call__(self):
assert me.id
should_process = self.roles
rlog = log.sublog("convoy-resolver", LOGLVL_DEBUG)
while True:
for r in should_process:
if r in self.roles_wanted:
resp = self.ns.add(r.name(), me.id, r.limit)
ans = None
if type(resp) == ConsensusSet:
self.roles_owned.add(r)
ans = resp
else:
if r in self.roles_owned:
self.roles_owned.remove(r)
if resp.set:
ans = resp.set
else:
ans = self.ns.lookup(r.name())
if ans:
self.role_clocks[r.name()] = ans.clock
for m in self.role_messages[r]:
self.routes[m] = ans.members
if should_process:
self.log_resolution_table(rlog, should_process)
self.table_changes.put(None)
wait_result = self.ns.wait(5, self.role_clocks)
if type(wait_result) == ConvoyWaitDone:
should_process = set([self.role_by_name[wait_result.key]])
else:
should_process = set()
self.ns.alive()
def log_resolution_table(self, rlog, processed):
rlog.debug("======== diesel/convoy routing table updates ========")
rlog.debug(" ")
for p in processed:
rlog.debug(" %s [%s]" %
(p.name(),
', '.join(self.role_messages[p])))
if self.role_messages:
hosts = self.routes[self.role_messages[p][0]]
for h in hosts:
rlog.debug(" %s %s" % (
'*' if h == me.id else '-',
h))
def register(self, mod):
for name in dir(mod):
v = getattr(mod, name)
if type(v) is type and issubclass(v, ProtoBase):
self.classes[v.__name__] = v
def add_target_role(self, o):
self.roles.add(o)
self.role_by_name[o.name()] = o
for k, v in o.__dict__.iteritems():
if k.startswith("handle_") and callable(v):
handler_for = k.split("_", 1)[-1]
assert handler_for in self.classes, "protobuf class not recognized; register() the module"
self.role_messages[o].append(handler_for)
def host_specific_send(self, host, msg, typ, transport_cb):
if host not in self.host_queues:
q = Queue()
fork(host_loop, host, q)
self.host_queues[host] = q
self.host_queues[host].put((msg, typ, transport_cb))
def local_dispatch(self, env):
if env.type not in self.classes:
self.host_specific_send(env.node_id,
MessageResponse(in_response_to=env.req_id,
result=MessageResponse.REFUSED,
error_message="cannot handle type"),
MESSAGE_RES, None)
elif me.id not in self.routes[env.type]:
# use routes, balance, etc
self.host_specific_send(env.node_id,
MessageResponse(in_response_to=env.req_id,
delivered=MessageResponse.REFUSED,
error_message="do not own route"),
MESSAGE_RES, None)
else:
inst = self.classes[env.type](env.body)
r = self.local_handlers[env.type]
sender = ConvoySender(env)
back = MessageResponse(in_response_to=env.req_id,
delivered=MessageResponse.ACCEPTED)
self.host_specific_send(env.node_id, back,
MESSAGE_RES, None)
try:
r(sender, inst)
except Exception, e:
s = str(e)
back.result = MessageResponse.EXCEPTION
back.error_message = s
raise
else:
if sender.responses:
back.result = MessageResponse.RESULT
back.responses.extend(sender.responses)
else:
back.result = MessageResponse.NULL
if env.wants_result:
back.delivered = MessageResponse.FINISHED
self.host_specific_send(env.node_id, back,
MESSAGE_RES, None)
def local_response(self, result):
id = result.in_response_to
if result.delivered == MessageResponse.REFUSED:
self.retry(id)
elif result.delivered == MessageResponse.ACCEPTED:
if id in self.pending:
del self.pending[id]
elif result.delivered == MessageResponse.FINISHED:
if id in self.rpc_waits:
q = self.rpc_waits.pop(id)
if result.result == MessageResponse.EXCEPTION:
resp = ConvoyRemoteException(result.error_message)
elif result.result == MessageResponse.NULL:
resp = ConvoyRemoteNull()
elif result.result == MessageResponse.RESULT:
res = [self.classes[m.type](m.body)
for m in result.responses]
resp = ConvoyRemoteResult(res)
else:
assert 0
q.put(resp)
def send(self, m, timeout=10):
self.incoming.put(Delivery(m, timeout))
def broadcast(self, m):
pass
def rpc(self, m, timeout=10):
q = Queue()
self.incoming.put(Delivery(m, timeout, rqueue=q))
ev, res = first(sleep=timeout, waits=[q])
if ev == q:
if res == ConvoyRemoteException:
raise ConvoyRemoteError(res.exc_desc)
if res == ConvoyRemoteNull:
return None
return res
else:
raise ConvoyTimeoutError("No response from a " +
("consensus remote within %ss timeout period" % timeout))
def retry(self, id):
if id in self.pending:
next = self.pending.pop(id)
self.incoming.put(next)
def deliver(self):
deferred = []
srg = self.routes.get
empty = set()
sorter = op.attrgetter("reschedule_at")
while True:
wait = (1.0 if not deferred else
deferred[-1].remaining)
r, next = first(waits=[self.incoming,
self.table_changes], sleep=wait)
if r == self.incoming:
if next.rqueue:
self.rpc_waits[next.id] = next.rqueue
hosts = srg(next.target, empty)
potentials = hosts - next.hosts_tried
if not potentials:
next.reschedule()
deferred.append(next)
else:
host = choice(list(potentials))
next.hosts_tried.add(host)
self.pending[next.id] = next
self.host_specific_send(host, next.env,
MESSAGE_OUT,
partial(self.retry, next.id))
deferred.sort(key=sorter, reverse=True)
t = time()
while deferred and deferred[-1].due(t):
i = deferred.pop()
if not i.expired(t):
self.incoming.put(i)
class Delivery(object):
def __init__(self, m, timeout, rqueue=None, broadcast=False):
self.id = str(uuid4())
self.target = m.__class__.__name__
self.timeout = time() + timeout
self.rqueue = rqueue
self.hosts_tried = set()
self.reschedule_at = None
self.reschedule_interval = 0.2
self.m = m
self.broadcast = broadcast
self.env = MessageEnvelope(
body=m.dumps(),
type=self.target,
req_id=self.id,
node_id=me.id,
wants_result=bool(rqueue))
def due(self, t):
return t >= self.reschedule_at
def reschedule(self):
self.reschedule_at = min(
time() + self.reschedule_interval,
self.timeout)
self.reschedule_interval *= 2
self.hosts_tried = set()
@property
def remaining(self):
return max(0, self.reschedule_at
- time())
def expired(self, t):
return t >= self.timeout
class ConvoySender(object):
def __init__(self, env):
self.from_host = env.node_id
self.type = env.type
self.req_id = env.req_id
self.responses = []
def respond(self, m):
env = MessageEnvelope(body=m.dumps(),
type=m.__class__.__name__,
req_id='',
node_id='',
wants_result=False)
self.responses.append(env)
convoy = Convoy()
class ConvoyRegistrar(type):
def __new__(*args):
t = type.__new__(*args)
if t.__name__ != 'ConvoyRole':
convoy.add_target_role(t)
return t
class ConvoyRole(object):
__metaclass__ = ConvoyRegistrar
limit = 0
@classmethod
def name(cls):
return cls.__name__
| bsd-3-clause | 12d76ca5daa8c27daf34d9b3e571af9c | 33.34349 | 106 | 0.517503 | 4.043705 | false | false | false | false |
topazproject/topaz | topaz/objects/fiberobject.py | 1 | 6701 | import copy
from rpython.rlib import jit
from rpython.rlib.rstacklet import StackletThread
from topaz.interpreter import RaiseReturn, RaiseBreak
from topaz.module import ClassDef
from topaz.objects.objectobject import W_Object
class State(object):
def __init__(self, space):
self.current = None
def get_current(self, space):
return self.current or space.getexecutioncontext().getmainfiber(space)
class W_FiberObject(W_Object):
"""
Fibers have a number of possible states:
* Has not yet begun execution: self.sthread is None
* Currently execution: self.sthread is not None and self is State.get_current()
* Suspended execution: self.sthread is not None and self.parent_fiber is None
* Suspended execution in the stack of fibers: self.sthread is not None and (self.parent_fiber is None or self is space.w_main_fiber)
* Dead: self.sthread is not None and self.sthread.is_empty_handle(self.h)
"""
classdef = ClassDef("Fiber", W_Object.classdef)
def __init__(self, space, klass=None):
W_Object.__init__(self, space, klass)
self.w_block = None
self.sthread = None
self.parent_fiber = None
def __deepcopy__(self, memo):
obj = super(W_FiberObject, self).__deepcopy__(memo)
obj.w_block = copy.deepcopy(self.w_block, memo)
obj.sthread = copy.deepcopy(self.sthread, memo)
obj.parent_fiber = copy.deepcopy(self.parent_fiber, memo)
return obj
@staticmethod
def build_main_fiber(space, ec):
w_fiber = W_FiberObject(space)
w_fiber.sthread = W_FiberObject.get_sthread(space, ec)
return w_fiber
@staticmethod
def get_sthread(space, ec):
sthread = ec.fiber_thread
if not sthread:
sthread = ec.fiber_thread = SThread(space.config, ec)
return sthread
@classdef.singleton_method("allocate")
def singleton_method_allocate(self, space):
return W_FiberObject(space, self)
@classdef.singleton_method("yield")
def singleton_method_yield(self, space, args_w):
current = space.fromcache(State).get_current(space)
parent_fiber = current.parent_fiber
if parent_fiber is None:
raise space.error(space.w_FiberError, "can't yield from root fiber")
space.fromcache(State).current = parent_fiber
topframeref = space.getexecutioncontext().topframeref
current.bottomframe.backref = jit.vref_None
if len(args_w) == 0:
global_state.w_result = space.w_nil
elif len(args_w) == 1:
global_state.w_result = args_w[0]
else:
global_state.w_result = space.newarray(args_w)
parent_fiber.h = space.getexecutioncontext().fiber_thread.switch(parent_fiber.h)
assert space.fromcache(State).current is current
current.bottomframe.backref = space.getexecutioncontext().topframeref
space.getexecutioncontext().topframeref = topframeref
return get_result()
@classdef.method("initialize")
@jit.unroll_safe
def method_initialize(self, space, block):
if block is None:
raise space.error(space.w_ArgumentError)
self.w_block = block
self.bottomframe = space.create_frame(
self.w_block.bytecode, w_self=self.w_block.w_self,
lexical_scope=self.w_block.lexical_scope, block=self.w_block.block,
parent_interp=self.w_block.parent_interp,
top_parent_interp=self.w_block.top_parent_interp,
regexp_match_cell=self.w_block.regexp_match_cell,
)
for idx, cell in enumerate(self.w_block.cells):
self.bottomframe.cells[len(self.w_block.bytecode.cellvars) + idx] = cell
@classdef.method("resume")
def method_resume(self, space, args_w):
if self.parent_fiber is not None:
raise space.error(space.w_FiberError, "double resume")
if self.sthread is not None and self.sthread.is_empty_handle(self.h):
raise space.error(space.w_FiberError, "dead fiber called")
self.parent_fiber = space.fromcache(State).get_current(space)
try:
global_state.space = space
global_state.space.fromcache(State).current = self
topframeref = space.getexecutioncontext().topframeref
if self.sthread is None:
self.bottomframe.handle_block_args(space, self.w_block.bytecode, args_w, self.w_block)
sthread = self.get_sthread(space, space.getexecutioncontext())
self.sthread = sthread
self.h = sthread.new(new_stacklet_callback)
else:
if len(args_w) == 1:
global_state.w_result = args_w[0]
else:
global_state.w_result = space.newarray(args_w)
self.h = self.sthread.switch(self.h)
assert space.fromcache(State).current is self.parent_fiber
space.getexecutioncontext().topframeref = topframeref
return get_result()
finally:
self.parent_fiber = None
class SThread(StackletThread):
def __init__(self, config, ec):
StackletThread.__init__(self, config)
self.config = config
self.ec = ec
def __deepcopy__(self, memo):
return SThread(self.config, copy.deepcopy(self.ec, memo))
class GlobalState(object):
def __init__(self):
self.clear()
def clear(self):
self.w_result = None
self.propagate_exception = None
self.space = None
# This makes me sad.
global_state = GlobalState()
def new_stacklet_callback(h, arg):
space = global_state.space
self = space.fromcache(State).current
origin = self.parent_fiber
origin.h = h
global_state.clear()
with self.sthread.ec.visit_frame(self.bottomframe):
try:
try:
global_state.w_result = space.execute_frame(self.bottomframe, self.w_block.bytecode)
except RaiseReturn:
raise space.error(space.w_LocalJumpError, "unexpected return")
except RaiseBreak:
raise space.error(space.w_LocalJumpError, "break from proc-closure")
except Exception as e:
global_state.propagate_exception = e
space.fromcache(State).current = self.parent_fiber
global_state.space = space
return origin.h
def get_result():
if global_state.propagate_exception:
e = global_state.propagate_exception
global_state.propagate_exception = None
raise e
else:
w_result = global_state.w_result
global_state.w_result = None
return w_result
| bsd-3-clause | 5a4e5e255ed8dd772b44da0f5bef94ba | 35.221622 | 136 | 0.635875 | 3.66977 | false | false | false | false |
topazproject/topaz | topaz/modules/ffi/ffi.py | 1 | 3964 | from __future__ import absolute_import
from topaz.error import RubyError
from topaz.module import ModuleDef
from topaz.modules.ffi.type import (POINTER, lltype_sizes, W_TypeObject,
type_names, W_BuiltinType)
from topaz.modules.ffi.function import W_FFIFunctionObject
from topaz.modules.ffi.function_type import W_FunctionTypeObject
from topaz.modules.ffi.variadic_invoker import W_VariadicInvokerObject
from topaz.modules.ffi.dynamic_library import W_DynamicLibraryObject
from topaz.modules.ffi.abstract_memory import W_AbstractMemoryObject
from topaz.modules.ffi.pointer import W_PointerObject
from topaz.modules.ffi.memory_pointer import W_MemoryPointerObject
from topaz.modules.ffi.data_converter import DataConverter
import platform
class FFI(object):
moduledef = ModuleDef("FFI")
@moduledef.setup_module
def setup_module(space, w_mod):
# setup modules from other files
w_Type = space.getclassfor(W_TypeObject)
space.set_const(w_mod, 'Type', w_Type)
space.set_const(w_Type, 'Builtin', space.getclassfor(W_BuiltinType))
space.set_const(w_mod, 'DynamicLibrary',
space.getclassfor(W_DynamicLibraryObject))
space.set_const(w_mod, 'Function',
space.getclassfor(W_FFIFunctionObject))
w_FunctionType = space.getclassfor(W_FunctionTypeObject)
space.set_const(w_mod, 'FunctionType', w_FunctionType)
space.set_const(w_mod, 'FunctionInfo', w_FunctionType)
space.set_const(w_mod, 'CallbackInfo', w_FunctionType)
space.set_const(w_mod, 'VariadicInvoker',
space.getclassfor(W_VariadicInvokerObject))
space.set_const(w_mod, 'AbstractMemory',
space.getclassfor(W_AbstractMemoryObject))
space.set_const(w_mod, 'Pointer',
space.getclassfor(W_PointerObject))
space.set_const(w_mod, 'MemoryPointer',
space.getclassfor(W_MemoryPointerObject))
space.set_const(w_mod, 'DataConverter',
space.getmoduleobject(DataConverter.moduledef))
w_native_type = space.newmodule('NativeType')
# This assumes that FFI::Type and the type constants already exist
for typename in type_names:
try:
w_ffi_type = space.find_const(w_Type, typename)
# setup type constants
space.set_const(w_mod, 'TYPE_' + typename, w_ffi_type)
# setup NativeType
space.set_const(w_native_type, typename, w_ffi_type)
except RubyError:
pass
space.set_const(w_mod, 'NativeType', w_native_type)
# setup Platform
w_platform = space.newmodule('Platform')
w_cpu = space.newstr_fromstr(platform.machine())
space.set_const(w_platform, 'CPU', w_cpu)
name_postfix = '_SIZE'
for name_prefix in ['INT8', 'INT16', 'INT32', 'INT64',
'LONG', 'FLOAT', 'DOUBLE']:
w_type = space.find_const(w_mod, 'Type')
w_tp = space.find_const(w_type, name_prefix)
space.set_const(w_platform, name_prefix + name_postfix,
space.send(w_tp, 'size'))
space.set_const(w_platform, 'ADDRESS_SIZE',
space.newint(
lltype_sizes[POINTER]))
space.set_const(w_mod, 'Platform', w_platform)
# setup StructLayout
w_struct_layout = space.newclass('StructLayout', None)
w_struct_layout_field = space.newclass('Field', None)
space.set_const(w_struct_layout, 'Field', w_struct_layout_field)
space.set_const(w_mod, 'StructLayout', w_struct_layout)
# setup StructByReference
w_struct_by_reference = space.newclass('StructByReference', None)
space.set_const(w_mod, 'StructByReference', w_struct_by_reference)
| bsd-3-clause | b56c2b333d84113234193e5887414383 | 46.190476 | 76 | 0.628406 | 3.684015 | false | false | false | false |
topazproject/topaz | tests/jit/test_instance_vars.py | 2 | 6469 | from .base import BaseJITTest
class TestInstanceVars(BaseJITTest):
def test_initialize(self, topaz, tmpdir):
traces = self.run(topaz, tmpdir, """
class A
def initialize
@a = 1
@b = 2
@c = 3
end
end
i = 0
while i < 10000
A.new
i += 1
end
""")
self.assert_matches(traces[0].loop, """
label(p0, p1, p2, p4, p6, p9, p10, i43, p19, p22, p24, descr=TargetToken(140691297408272))
debug_merge_point(0, 0, '<main> at LOAD_DEREF')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
setfield_gc(p24, 34, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
guard_not_invalidated(descr=<Guard0x7ff53edb8f90>)
p46 = force_token()
i48 = int_lt(i43, 10000)
guard_true(i48, descr=<Guard0x7ff53ecf9658>)
debug_merge_point(0, 0, '<main> at JUMP_IF_FALSE')
debug_merge_point(0, 0, '<main> at LOAD_SCOPE')
debug_merge_point(0, 0, '<main> at LOAD_LOCAL_CONSTANT')
debug_merge_point(0, 0, '<main> at SEND')
p49 = force_token()
p50 = force_token()
p51 = force_token()
enter_portal_frame(0, 0)
debug_merge_point(1, 1, 'initialize at LOAD_SELF')
debug_merge_point(1, 1, 'initialize at LOAD_CONST')
debug_merge_point(1, 1, 'initialize at STORE_INSTANCE_VAR')
debug_merge_point(1, 1, 'initialize at DISCARD_TOP')
debug_merge_point(1, 1, 'initialize at LOAD_SELF')
debug_merge_point(1, 1, 'initialize at LOAD_CONST')
debug_merge_point(1, 1, 'initialize at STORE_INSTANCE_VAR')
debug_merge_point(1, 1, 'initialize at DISCARD_TOP')
debug_merge_point(1, 1, 'initialize at LOAD_SELF')
debug_merge_point(1, 1, 'initialize at LOAD_CONST')
debug_merge_point(1, 1, 'initialize at STORE_INSTANCE_VAR')
debug_merge_point(1, 1, 'initialize at RETURN')
leave_portal_frame(0)
debug_merge_point(0, 0, '<main> at DISCARD_TOP')
debug_merge_point(0, 0, '<main> at LOAD_DEREF')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
p55 = force_token()
i57 = int_add(i43, 1)
debug_merge_point(0, 0, '<main> at STORE_DEREF')
debug_merge_point(0, 0, '<main> at DISCARD_TOP')
debug_merge_point(0, 0, '<main> at JUMP')
debug_merge_point(0, 0, '<main> at LOAD_DEREF')
setfield_gc(p24, 58, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
jump(p0, p1, p2, p4, p6, p9, p10, i57, p19, p22, p24, descr=TargetToken(140691297408272))
""")
def test_unboxed_int_storage(self, topaz, tmpdir):
traces = self.run(topaz, tmpdir, """
@i = 0
while @i < 10000
@i += 1
end
""")
self.assert_matches(traces[0].loop, """
label(p0, p1, p2, p4, p6, p7, p9, p10, p20, p26, f38, descr=TargetToken(140220342079424))
debug_merge_point(0, 0, '<main> at LOAD_SELF')
debug_merge_point(0, 0, '<main> at LOAD_INSTANCE_VAR')
i41 = convert_float_bytes_to_longlong(f38)
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
setfield_gc(p20, 23, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
guard_not_invalidated(descr=<Guard0x7f8797bb8df0>)
p43 = force_token()
i45 = int_lt(i41, 10000)
guard_true(i45, descr=<Guard0x7f8797af8ba8>)
debug_merge_point(0, 0, '<main> at JUMP_IF_FALSE')
debug_merge_point(0, 0, '<main> at LOAD_SELF')
debug_merge_point(0, 0, '<main> at DUP_TOP')
debug_merge_point(0, 0, '<main> at LOAD_INSTANCE_VAR')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
p46 = force_token()
i48 = int_add(i41, 1)
debug_merge_point(0, 0, '<main> at STORE_INSTANCE_VAR')
f49 = convert_longlong_bytes_to_float(i48)
debug_merge_point(0, 0, '<main> at DISCARD_TOP')
debug_merge_point(0, 0, '<main> at JUMP')
debug_merge_point(0, 0, '<main> at LOAD_SELF')
i50 = arraylen_gc(p26, descr=<ArrayF 8>)
setfield_gc(p20, 39, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
setarrayitem_gc(p26, 0, f49, descr=<ArrayF 8>)
jump(p0, p1, p2, p4, p6, p7, p9, p10, p20, p26, f49, descr=TargetToken(140220342079424))
""")
def test_unboxed_float_storage(self, topaz, tmpdir):
traces = self.run(topaz, tmpdir, """
@data = 0.0
while @data < 10000.0
@data += 1.0
end
""")
self.assert_matches(traces[0].loop, """
label(p0, p1, p2, p4, p6, p7, p9, p10, p20, p26, f36, descr=TargetToken(139792504197136))
debug_merge_point(0, 0, '<main> at LOAD_SELF')
debug_merge_point(0, 0, '<main> at LOAD_INSTANCE_VAR')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
setfield_gc(p20, 23, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
guard_not_invalidated(descr=<Guard0x7f23fa9b8df0>)
p40 = force_token()
i42 = float_lt(f36, 10000.000000)
guard_true(i42, descr=<Guard0x7f23fa8f8ba8>)
debug_merge_point(0, 0, '<main> at JUMP_IF_FALSE')
debug_merge_point(0, 0, '<main> at LOAD_SELF')
debug_merge_point(0, 0, '<main> at DUP_TOP')
debug_merge_point(0, 0, '<main> at LOAD_INSTANCE_VAR')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
p43 = force_token()
f45 = float_add(f36, 1.000000)
debug_merge_point(0, 0, '<main> at STORE_INSTANCE_VAR')
debug_merge_point(0, 0, '<main> at DISCARD_TOP')
debug_merge_point(0, 0, '<main> at JUMP')
debug_merge_point(0, 0, '<main> at LOAD_SELF')
i46 = arraylen_gc(p26, descr=<ArrayF 8>)
setfield_gc(p20, 39, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
setarrayitem_gc(p26, 0, f45, descr=<ArrayF 8>)
jump(p0, p1, p2, p4, p6, p7, p9, p10, p20, p26, f45, descr=TargetToken(139792504197136))
""")
| bsd-3-clause | f273dd63ce7450d2edeeab7ba804e21d | 46.218978 | 103 | 0.585716 | 2.85481 | false | false | false | false |
topazproject/topaz | tests/objects/test_classobject.py | 3 | 3295 | import pytest
from ..base import BaseTopazTest
class TestClassObject(BaseTopazTest):
def test_name(self, space):
space.execute("Class")
def test_generated_subclass(self, space):
w_res = space.execute("""
class Foo
class Bar
class Baz
end
end
end
return Foo::Bar::Baz.name
""")
assert space.str_w(w_res) == "Foo::Bar::Baz"
def test_to_s(self, space):
w_res = space.execute("return 1.class.to_s")
assert space.str_w(w_res) == "Fixnum"
w_res = space.execute("return 1.class.class.to_s")
assert space.str_w(w_res) == "Class"
def test_anon_class_to_s(self, space):
w_res = space.execute("return Class.new.to_s")
assert space.str_w(w_res).startswith("#<Class:0x")
w_res = space.execute("return Class.new.new.to_s")
assert space.str_w(w_res).startswith("#<#<Class:0x")
@pytest.mark.xfail
def test_singletonclass_to_s(self, space):
w_res = space.execute("Class.new.singleton_class.to_s")
assert space.str_w(w_res).startswith("#<Class:#<Class:0x")
def test_anon_class_name(self, space):
w_res = space.execute("return Class.new.name")
assert w_res is space.w_nil
def test_anon_class_method_missing_raises(self, space):
with self.raises(space, "NoMethodError"):
space.execute("""
class A; end
Class.new.does_not_exist
""")
def test_singletonclass_name(self, space):
w_res = space.execute("Class.new.singleton_class.name")
assert w_res is space.w_nil
def test_class_new(self, space):
w_res = space.execute("return Class.new.superclass.name")
assert space.str_w(w_res) == "Object"
w_res = space.execute("return Class.new(String).superclass.name")
assert space.str_w(w_res) == "String"
def test_new(self, space):
w_res = space.execute("""
class X
end
return X.new
""")
w_cls = space.w_object.constants_w["X"]
assert space.getclass(w_res) is w_cls
w_res = space.execute("""
class X
def m
self
end
end
x = X.new
return [x, x.m]
""")
[w_x, w_xm] = space.listview(w_res)
assert w_xm is w_x
def test_superclass(self, space):
w_res = space.execute("return Object.superclass")
assert w_res is space.w_basicobject
w_res = space.execute("return BasicObject.superclass")
assert w_res is space.w_nil
def test_attr_accessor(self, space):
w_res = space.execute("""
class X
attr_accessor :a, :b, :c
def initialize a
@a = a
self.b = 25
end
end
x = X.new(3)
orig_a = x.a
x.a = 5
return [orig_a, x.a, x.b]
""")
assert [space.int_w(w_x) for w_x in space.listview(w_res)] == [3, 5, 25]
def test_attr_reader(self, space):
w_res = space.execute("""
class X
attr_reader :a
def initialize
@a = 5
end
end
return X.new.a
""")
assert space.int_w(w_res) == 5
| bsd-3-clause | 9c44ab4a68476a0ff8f6feb0948816f6 | 26.923729 | 80 | 0.537481 | 3.403926 | false | true | false | false |
klen/peewee_migrate | tests/test_migrator.py | 1 | 4700 | import peewee as pw
def test_migrator():
from playhouse.db_url import connect
from peewee_migrate import Migrator
database = connect("sqlite:///:memory:")
migrator = Migrator(database)
@migrator.create_table
class Customer(pw.Model):
name = pw.CharField()
assert Customer == migrator.orm["customer"]
@migrator.create_table
class Order(pw.Model):
number = pw.CharField()
uid = pw.CharField(unique=True)
customer_id = pw.ForeignKeyField(Customer, column_name="customer_id")
assert Order == migrator.orm["order"]
migrator.run()
migrator.add_columns(Order, finished=pw.BooleanField(default=False))
assert "finished" in Order._meta.fields
migrator.run()
migrator.drop_columns("order", "finished", "customer_id", "uid")
assert "finished" not in Order._meta.fields
assert not hasattr(Order, "customer_id")
assert not hasattr(Order, "customer_id_id")
migrator.run()
migrator.add_columns(Order, customer=pw.ForeignKeyField(Customer, null=True))
assert "customer" in Order._meta.fields
assert Order.customer.name == "customer"
migrator.run()
assert Order.customer.name == "customer"
migrator.rename_column(Order, "number", "identifier")
assert "identifier" in Order._meta.fields
migrator.run()
migrator.drop_not_null(Order, "identifier")
assert Order._meta.fields["identifier"].null
assert Order._meta.columns["identifier"].null
migrator.run()
migrator.add_default(Order, "identifier", 11)
assert Order._meta.fields["identifier"].default == 11
migrator.run()
migrator.change_columns(Order, identifier=pw.IntegerField(default=0))
assert Order.identifier.field_type == "INT"
migrator.run()
Order.create(identifier=55)
migrator.sql('UPDATE "order" SET identifier = 77;')
migrator.run()
order = Order.get()
assert order.identifier == 77
migrator.add_index(Order, "identifier", "customer")
migrator.run()
assert Order._meta.indexes
assert not Order.identifier.index
migrator.drop_index(Order, "identifier", "customer")
migrator.run()
assert not Order._meta.indexes
migrator.remove_fields(Order, "customer")
migrator.run()
assert not hasattr(Order, "customer")
migrator.add_index(Order, "identifier", unique=True)
migrator.run()
assert not Order.identifier.index
assert Order.identifier.unique
assert Order._meta.indexes
migrator.rename_table(Order, "orders")
assert migrator.orm["orders"]
assert migrator.orm["orders"]._meta.table_name == "orders"
migrator.run()
migrator.change_columns(Order, identifier=pw.IntegerField(default=0))
assert not Order._meta.indexes
def test_migrator_postgres():
"""
Ensure change_fields generates queries and
does not cause exception
"""
import peewee as pw
# Monkey patch psycopg2 connect
import psycopg2
from playhouse.db_url import connect
from peewee_migrate import Migrator
from .mocks import postgres
psycopg2.connect = postgres.MockConnection
database = connect("postgres:///fake")
migrator = Migrator(database)
@migrator.create_table
class User(pw.Model):
name = pw.CharField()
created_at = pw.DateField()
assert User == migrator.orm["user"]
# Date -> DateTime
migrator.change_fields("user", created_at=pw.DateTimeField())
migrator.run()
assert (
'ALTER TABLE "user" ALTER COLUMN "created_at" TYPE TIMESTAMP'
in database.cursor().queries
)
# Char -> Text
migrator.change_fields("user", name=pw.TextField())
migrator.run()
assert (
'ALTER TABLE "user" ALTER COLUMN "name" TYPE TEXT' in database.cursor().queries
)
def test_rename_column(Order, migrator):
Order = migrator.orm["order"]
migrator.rename_column("order", "customer", "user")
assert Order._meta.columns["user_id"]
assert Order._meta.fields["user"]
[operation] = migrator.ops
assert operation.args == ("order", "customer_id", "user_id")
# Rollback
migrator.run()
migrator.rename_column("order", "user", "customer")
[operation] = migrator.ops
assert operation.args == ("order", "user_id", "customer_id")
def test_rename_table(Customer, migrator):
migrator.rename_table("customer", "user")
[operation] = migrator.ops
assert operation.args == ("customer", "user")
class User(pw.Model):
name = pw.CharField()
age = pw.IntegerField()
from peewee_migrate.auto import diff_many
migrations = diff_many([migrator.orm["user"]], [User], migrator)
assert not migrations
| bsd-3-clause | e3df67426fc8345361ec6c0992f2160b | 27.484848 | 87 | 0.669149 | 3.790323 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/satchmo_store/shop/templatetags/satchmo_currency.py | 14 | 1159 | from decimal import Decimal, InvalidOperation
from django import template
from django.utils.safestring import mark_safe
from l10n.utils import moneyfmt
from satchmo_utils.templatetags import get_filter_args
import logging
log = logging.getLogger("satchmo_currency")
register = template.Library()
def currency(value, args=""):
"""Convert a value to a money formatted string.
places: required number of places after the decimal point
curr: optional currency symbol before the sign (may be blank)
wrapcents:tag to wrap the part after the decimal point
Usage:
val|currency
val|currency:'places=2'
val|currency:'places=2:wrapcents=sup'
"""
if value == '' or value is None:
return value
args, kwargs = get_filter_args(args,
keywords=('places','curr', 'wrapcents'),
intargs=('places',), stripquotes=True)
try:
value = Decimal(str(value))
except InvalidOperation:
log.error("Could not convert value '%s' to decimal", value)
raise
return mark_safe(moneyfmt(value, **kwargs))
register.filter('currency', currency)
currency.is_safe = True
| bsd-3-clause | cb083b70ae68579f53684b1e307ae054 | 26.595238 | 68 | 0.686799 | 4.010381 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/shipping/modules/canadapost/shipper.py | 5 | 7138 | '''
Canada Post Shipping Module
v0.1.1
'''
# Note, make sure you use decimal math everywhere!
from decimal import Decimal
from django.core.cache import cache
from django.template import loader, Context
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from livesettings import config_get_group, config_value
from shipping.modules.base import BaseShipper
import datetime
import logging
import urllib2
try:
from xml.etree.ElementTree import fromstring, tostring
except ImportError:
from elementtree.ElementTree import fromstring, tostring
log = logging.getLogger('canadapost.shipper')
class Shipper(BaseShipper):
def __init__(self, cart=None, contact=None, service_type=None):
self._calculated = False
self.cart = cart
self.contact = contact
if service_type:
self.service_type_code = service_type[0]
self.service_type_text = service_type[1]
else:
self.service_type_code = '99'
self.service_type_text = 'Uninitialized'
self.id = u'canadapost-%s' % (self.service_type_code)
def __str__(self):
'''
This is mainly helpful for debugging purposes
'''
return 'Canada Post'
def __unicode__(self):
'''
As is this.
'''
return 'Canada Post'
def description(self):
'''
A basic description that will be displayed to the user when
selecting their shipping options
'''
return _('Canada Post - %s' % self.service_type_text)
def cost(self):
'''
Complex calculations can be done here as long as the return
value is a decimal figure
'''
assert(self._calculated)
return(Decimal(self.charges))
def method(self):
'''
Describes the actual delivery service (Mail, FedEx, DHL, UPS, etc)
'''
return _('Canada Post')
def expectedDelivery(self):
'''
Can be a plain string or complex calcuation
returning an actual date
'''
if str(self.delivery_days) <> '1':
return _('%s business days' % self.delivery_days)
else:
return _('%s business day' % self.delivery_days)
def valid(self, order=None):
'''
Can do complex validation about whether or not this
option is valid. For example, may check to see if the
recipient is in an allowed country or location.
'''
return self.is_valid
def _process_request(self, connection, request):
'''
Post the data and return the XML response
'''
conn = urllib2.Request(url=connection, data=request.encode("utf-8"))
f = urllib2.urlopen(conn)
all_results = f.read()
self.raw = all_results
return(fromstring(all_results))
def calculate(self, cart, contact):
'''
Based on the chosen Canada Post method, we will do our call(s)
to Canada Post and see how much it will cost. We will also need
to store the results for further parsing and return via the
methods above.
'''
log.debug("Starting Canada Post calculations")
from satchmo_store.shop.models import Config
settings = config_get_group('shipping.modules.canadapost')
verbose = settings.VERBOSE_LOG.value
self.delivery_days = _('3 - 4') #Default setting for ground delivery
shop_details = Config.objects.get_current()
self.packaging = ''
self.is_valid = False
error = False
self.charges = 0
if not settings.CPCID.value:
log.warn("No CPCID found in settings")
return
if settings.LIVE.value:
connection = settings.CONNECTION.value
else:
connection = settings.CONNECTION_TEST.value
configuration = {
'cpcid': settings.CPCID.value,
'turn_around_time': settings.TURN_AROUND_TIME.value,
'packaging': self.packaging,
'ship_type': self.service_type_code,
'shop_details':shop_details,
}
c = Context({
'config': configuration,
'cart': cart,
'contact': contact
})
t = loader.get_template('shipping/canadapost/request.xml')
request = t.render(c)
self.is_valid = False
cache_key_response = "canadapost-cart-%s-response" % int(cart.id)
cache_key_request = "canadapost-cart-%s-request" % int(cart.id)
last_request = cache.get(cache_key_request)
tree = cache.get(cache_key_response)
if (last_request != request) or tree is None:
self.verbose_log("Requesting from Canada Post [%s]\n%s", cache_key_request, request)
cache.set(cache_key_request, request, 60)
tree = self._process_request(connection, request)
self.verbose_log("Got from Canada Post [%s]:\n%s", cache_key_response, self.raw)
needs_cache = True
else:
needs_cache = False
try:
status_code = tree.getiterator('statusCode')
status_val = status_code[0].text
self.verbose_log("Canada Post Status Code for cart #%s = %s", int(cart.id), status_val)
except AttributeError:
status_val = "-1"
if status_val == '1':
self.is_valid = False
self._calculated = False
all_rates = tree.getiterator('product')
for rate in all_rates:
self.verbose_log("Got product id from cp: %s", rate.attrib['id'])
if self.service_type_code == rate.attrib['id']:
self.charges = Decimal(rate.find('.//rate').text)
#YYYY-MM-DD
delivery_date = rate.find('.//deliveryDate').text
shipping_date = rate.find('.//shippingDate').text
self.delivery_days = datetime.date(
int(delivery_date[:4]),
int(delivery_date[5:7]),
int(delivery_date[8:])) - \
datetime.date(
int(shipping_date[:4]),
int(shipping_date[5:7]),
int(shipping_date[8:]))
self.delivery_days = self.delivery_days.days
self.is_valid = True
self._calculated = True
if not self.is_valid:
self.verbose_log("Canada Post Cannot find rate for code: %s [%s]", self.service_type_code, self.service_type_text)
def verbose_log(self, *args, **kwargs):
if config_value('shipping.modules.canadapost', 'VERBOSE_LOG'):
log.debug(*args, **kwargs)
| bsd-3-clause | c59f541a46de2707b9a800c813a1f222 | 33.483092 | 130 | 0.550714 | 4.312991 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/shipping/modules/tieredquantity/models.py | 13 | 7113 | """
Tiered shipping models
"""
from decimal import Decimal
from django.conf import settings
from django.db import models
from django.utils.translation import get_language, ugettext_lazy as _
from shipping.modules.base import BaseShipper
import datetime
import logging
import operator
log = logging.getLogger('shipping.TieredQuantity')
class TieredPriceException(Exception):
def __init__(self, reason):
self.reason = reason
class Shipper(BaseShipper):
def __init__(self, carrier):
self.id = carrier.key
self.carrier = carrier
super(BaseShipper, self).__init__()
def __str__(self):
"""
This is mainly helpful for debugging purposes
"""
return "Tiered_Shipper: %s" % self.id
def description(self):
"""
A basic description that will be displayed to the user when selecting their shipping options
"""
return self.carrier.description
def cost(self):
"""
Complex calculations can be done here as long as the return value is a dollar figure
"""
assert(self._calculated)
qty = Decimal('0')
for cartitem in self.cart.cartitem_set.all():
if cartitem.product.is_shippable:
qty += cartitem.quantity
return self.carrier.price(qty)
def method(self):
"""
Describes the actual delivery service (Mail, FedEx, DHL, UPS, etc)
"""
return self.carrier.method
def expectedDelivery(self):
"""
Can be a plain string or complex calcuation returning an actual date
"""
return self.carrier.delivery
def valid(self, order=None):
"""
Check to see if this order can be shipped using this method
"""
if order:
quants = [item.quantity for item in order.orderitem_set.all() if item.product.is_shippable]
if quants:
qty = reduce(operator.add, quants)
else:
qty = Decimal('0')
elif self.cart:
qty = self.cart.numItems
try:
price = self.carrier.price(qty)
except TieredPriceException:
return False
return True
class Carrier(models.Model):
key = models.SlugField(_('Key'))
ordering = models.IntegerField(_('Ordering'), default=0)
active = models.BooleanField(_('Active'), default=True)
def _find_translation(self, language_code=None):
if not language_code:
language_code = get_language()
c = self.translations.filter(languagecode__exact = language_code)
ct = c.count()
if not c or ct == 0:
pos = language_code.find('-')
if pos>-1:
short_code = language_code[:pos]
log.debug("%s: Trying to find root language content for: [%s]", self.id, short_code)
c = self.translations.filter(languagecode__exact = short_code)
ct = c.count()
if ct>0:
log.debug("%s: Found root language content for: [%s]", self.id, short_code)
if not c or ct == 0:
#log.debug("Trying to find default language content for: %s", self)
c = self.translations.filter(languagecode__istartswith = settings.LANGUAGE_CODE)
ct = c.count()
if not c or ct == 0:
#log.debug("Trying to find *any* language content for: %s", self)
c = self.translations.all()
ct = c.count()
if ct > 0:
trans = c[0]
else:
trans = None
return trans
def delivery(self):
"""Get the delivery, looking up by language code, falling back intelligently.
"""
trans = self._find_translation()
if trans:
return trans.delivery
else:
return ""
delivery = property(delivery)
def description(self):
"""Get the description, looking up by language code, falling back intelligently.
"""
trans = self._find_translation()
if trans:
return trans.description
else:
return ""
description = property(description)
def method(self):
"""Get the description, looking up by language code, falling back intelligently.
"""
trans = self._find_translation()
if trans:
return trans.method
else:
return ""
method = property(method)
def name(self):
"""Get the name, looking up by language code, falling back intelligently.
"""
trans = self._find_translation()
if trans:
return trans.name
else:
return self.key
name = property(name)
def price(self, qty):
"""Get a price for this qty."""
# first check for special discounts
prices = self.tiers.filter(expires__isnull=False, quantity__lte=qty).exclude(expires__lt=datetime.date.today())
if not prices.count() > 0:
prices = self.tiers.filter(expires__isnull=True, quantity__lte=qty)
if prices.count() > 0:
# Get the price with the quantity closest to the one specified without going over
return Decimal(prices.order_by('-quantity')[0].calculate_price(qty))
else:
log.debug("No quantity tier found for %s: qty=%d", self.id, qty)
raise TieredPriceException('No price available')
def __unicode__(self):
return u"Carrier: %s" % self.name
class Admin:
ordering = ('key',)
class Meta:
pass
class CarrierTranslation(models.Model):
carrier = models.ForeignKey('Carrier', related_name='translations')
languagecode = models.CharField(_('language'), max_length=10, choices=settings.LANGUAGES, )
name = models.CharField(_('Carrier'), max_length=50, )
description = models.CharField(_('Description'), max_length=200)
method = models.CharField(_('Method'), help_text=_("i.e. US Mail"), max_length=200)
delivery = models.CharField(_('Delivery Days'), max_length=200)
class QuantityTier(models.Model):
carrier = models.ForeignKey('Carrier', related_name='tiers')
quantity = models.DecimalField(_("Min Quantity"), max_digits=18, decimal_places=6,
help_text=_('Minimum qty in order for this to apply?'), )
handling = models.DecimalField(_("Handling Price"), max_digits=10,
decimal_places=2, )
price = models.DecimalField(_("Shipping Per Item"), max_digits=10,
decimal_places=2, )
expires = models.DateField(_("Expires"), null=True, blank=True)
def calculate_price(self, qty):
return self.handling + self.price * qty
def __unicode__(self):
return u"QuantityTier: %s @ %s" % (self.price, self.quantity)
class Admin:
ordering = ('min_total', 'expires')
class Meta:
pass
import config
| bsd-3-clause | 0d52ac7a5223cdff87071abb03ef6681 | 30.334802 | 119 | 0.578237 | 4.316141 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/shipping/modules/usps/shipper.py | 5 | 13163 | """
USPS Shipping Module
You must have a USPS account to use this module.
You may register at usps.com
This module uses the XML online tools for maximum flexibility. It is
primarily created for use in the US but reconfiguring for international
shipping or more advanced uses should be straightforward.
It is recommended that you refer to the USPS shipper developer documents
(available when you register at USPS) in order to tailor this to your
unique needs.
"""
# Note, make sure you use decimal math everywhere!
from decimal import Decimal
from django.core.cache import cache
from django.template import Context, loader
from django.utils.translation import ugettext as _
from l10n.models import Country
from livesettings import config_get_group, config_value
from shipping.modules.base import BaseShipper
import logging
import urllib2
try:
from xml.etree.ElementTree import fromstring, tostring
except ImportError:
from elementtree.ElementTree import fromstring, tostring
"""
The different class codes for each type of mail. Some types of mail have
several sub-options.
"""
CODES = {'0': 'FIRST CLASS',
'1': 'PRIORITY',
'16': 'PRIORITY',
'17': 'PRIORITY',
'22': 'PRIORITY',
'3': 'EXPRESS',
'13': 'EXPRESS',
'4': 'PARCEL',
'5': 'BPM',
'6': 'MEDIA',
'7': 'LIBRARY',
# these are here to avoid KeyErrors
'2': 'INTL',
'8': 'INTL',
'9': 'INTL',
'10': 'INTL',
'11': 'INTL',
'12': 'INTL',
'14': 'INTL',
'15': 'INTL',
}
"""
International service codes
(('14', 'First Class Mail International Large Envelope')),
(('15', 'First Class Mail International Package')),
(('1', 'Express Mail International (EMS)')),
(('4', 'Global Express Guaranteed')),
(('6', 'Global Express Guaranteed Non-Document Rectangular')),
(('7', 'Global Express Guaranteed Non-Document Non-Rectangular')),
(('10', 'Express Mail International (EMS) Flat-Rate Envelope')),
(('2', 'Priority Mail International')),
(('8', 'Priority Mail International Flat-Rate Envelope')),
(('9', 'Priority Mail International Flat-Rate Box')),
(('11', 'Priority Mail International Large Flat-Rate Box')),
(('12', 'USPS GXG Envelopes')),
"""
"""
These are the deliver estimates from the website at
http://www.usps.com/webtools/htm/DomMailServStandv1-0.htm#_Toc82841084
"""
ESTIMATES = {'FIRST CLASS': '3 - 5',
'PRIORITY': '1 - 3',
'EXPRESS': '1 - 2',
'PARCEL': '2 - 9',
'BPM': '2 - 9',
'MEDIA': '2 - 9',
'LIBRARY': '2 - 9',
'INTL': '',
}
"""
A list of API's for each type of mail service
"""
APIS = {'PRIORITY': 'PriorityMail',
'EXPRESS': 'ExpressMailCommitment',
'PARCEL': 'StandardB',
'BPM': 'StandardB',
'MEDIA': 'StandardB',
'LIBRARY': 'StandardB',
'INTL': '',
}
log = logging.getLogger('usps.shipper')
class Shipper(BaseShipper):
def __init__(self, cart=None, contact=None, service_type=None):
self._calculated = False
self.cart = cart
self.contact = contact
if service_type:
self.service_type_code = service_type[0]
self.service_type_text = service_type[1]
else:
self.service_type_code = "99"
self.service_type_text = "Uninitialized"
self.id = u"USPS-%s-%s" % (self.service_type_code, self.service_type_text)
self.raw = "NO DATA"
self.exact_date = False
#if cart or contact:
# self.calculate(cart, contact)
def __str__(self):
"""
This is mainly helpful for debugging purposes
"""
return "U.S. Postal Service"
def description(self):
"""
A basic description that will be displayed to the user when selecting their shipping options
"""
return _("USPS - %s" % self.service_type_text.replace('Int`l: ', ''))
def cost(self):
"""
Complex calculations can be done here as long as the return value is a decimal figure
"""
assert(self._calculated)
settings = config_get_group('shipping.modules.usps')
if settings.HANDLING_FEE and float(str(settings.HANDLING_FEE)) > 0.0:
self.charges = Decimal(self.charges) + Decimal(str(settings.HANDLING_FEE))
return Decimal(str(self.charges))
def method(self):
"""
Describes the actual delivery service (Mail, FedEx, DHL, UPS, etc)
"""
return _("USPS")
def expectedDelivery(self):
"""
Can be a plain string or complex calcuation returning an actual date
"""
if self.exact_date:
if self.is_intl:
return _('in %s' % self.delivery_days.lower())
return _("by %s" % self.delivery_days.replace('-', ' '))
elif self.delivery_days != "1":
return _("in %s business days" % self.delivery_days)
else:
return _("in %s business day" % self.delivery_days)
def valid(self, order=None):
"""
Can do complex validation about whether or not this option is valid.
For example, may check to see if the recipient is in an allowed country
or location.
"""
return self.is_valid
def _process_request(self, connection, request, api=None):
"""
Post the data and return the XML response
"""
# determine which API to use
if api == None:
if self.is_intl:
api = 'IntlRate'
else:
api = 'RateV3'
data = 'API=%s&XML=%s' % (api, request.encode('utf-8'))
conn = urllib2.Request(url=connection, data=data)
f = urllib2.urlopen(conn)
all_results = f.read()
log.error(all_results)
return (fromstring(all_results))
def render_template(self, template, cart=None, contact=None):
from satchmo_store.shop.models import Config
shop_details = Config.objects.get_current()
settings = config_get_group('shipping.modules.usps')
if not self.is_intl:
mail_type = CODES[self.service_type_code]
if mail_type == 'INTL': return ''
if mail_type == 'FIRST CLASS':
self.api = None
else:
self.api = APIS[mail_type]
else:
mail_type = None
self.api = None
# calculate the weight of the entire order
weight = Decimal('0.0')
for item in cart.cartitem_set.all():
if item.product.smart_attr('weight'):
weight += item.product.smart_attr('weight') * item.quantity
self.verbose_log('WEIGHT: %s' % weight)
# I don't know why USPS made this one API different this way...
if self.api == 'ExpressMailCommitment':
zip_ending = 'ZIP'
else:
zip_ending = 'zip'
# get the shipping country (for the international orders)
ship_country = contact.shipping_address.country.printable_name
configuration = {
'userid': settings.USER_ID.value,
'password': settings.USER_PASSWORD.value,
'container': settings.SHIPPING_CONTAINER.value,
'ship_type': mail_type,
'shop_details': shop_details
}
c = Context({
'config': configuration,
'cart': cart,
'contact': contact,
'is_international': self.is_intl,
'api': self.api,
'weight': weight,
'zip': zip_ending,
'country': ship_country,
'first_class_types': ['LETTER', 'FLAT', 'PARCEL']
})
t = loader.get_template(template)
return t.render(c)
def calculate(self, cart, contact):
"""
Based on the chosen USPS method, we will do our call to USPS and see how
much it will cost. We will also need to store the results for further
parsing and return via the methods above
"""
from satchmo_store.shop.models import Config
settings = config_get_group('shipping.modules.usps')
shop_details = Config.objects.get_current()
self.is_intl = contact.shipping_address.country.iso2_code != shop_details.country.iso2_code
self.delivery_days = ESTIMATES[CODES[self.service_type_code]]
if not self.is_intl:
template = 'shipping/usps/request.xml'
else:
template = 'shipping/usps/request_intl.xml'
request = self.render_template(template, cart, contact)
self.is_valid = False
if settings.LIVE.value:
connection = settings.CONNECTION.value
else:
connection = settings.CONNECTION_TEST.value
cache_key_response = "usps-cart-%s-response" % int(cart.id)
cache_key_request = "usps-cart-%s-request" % int(cart.id)
last_request = cache.get(cache_key_request)
tree = cache.get(cache_key_response)
if (last_request != request) or tree is None:
self.verbose_log("Requesting from USPS [%s]\n%s", cache_key_request, request)
cache.set(cache_key_request, request, 60)
tree = self._process_request(connection, request)
self.verbose_log("Got from USPS [%s]:\n%s", cache_key_response, self.raw)
needs_cache = True
else:
needs_cache = False
errors = tree.getiterator('Error')
# if USPS returned no error, return the prices
if errors == None or len(errors) == 0:
# check for domestic results first
all_packages = tree.getiterator('RateV3Response')
# if there are none, revert to international results
if len(all_packages) == 0:
all_packages = tree.getiterator('IntlRateResponse')
for package in all_packages:
for service in package.getiterator('Service'):
#self.verbose_log('%s vs %s' % (service.attrib['ID'], self.service_type_code))
if service.attrib['ID'] == self.service_type_code and \
self.service_type_text.startswith('Int`l: '):
self.charges = service.find('.//Postage/').text
self.delivery_days = service.find('.//SvcCommitments/').text
#self.verbose_log('%s %s' % (self.charges, self.delivery_days))
self.is_valid = True
self._calculated = True
self.exact_date = True
#if needs_cache:
# cache.set(cache_key_response, tree, 60)
else:
for package in all_packages:
for postage in package.getiterator('Postage'):
if postage.attrib['CLASSID'] == self.service_type_code and \
not self.service_type_text.startswith('Int`l: '):
self.charges = postage.find('.//Rate/').text
# Now try to figure out how long it would take for this delivery
if self.api:
delivery = self.render_template('shipping/usps/delivery.xml', cart, contact)
del_tree = self._process_request(connection, delivery, self.api)
parent = '%sResponse' % self.api
del_iter = del_tree.getiterator(parent)
if len(del_iter):
i = del_iter[0]
# express mail usually has a date commitment
if self.api == 'ExpressMailCommitment':
key = './/Date/'
self.exact_date = True
else:
key = './/Days/'
if i.find(key) != None:
self.delivery_days = i.find(key).text
self.is_valid = True
self._calculated = True
#if needs_cache:
# cache.set(cache_key_response, tree, 60)
else:
error = errors[0]
err_num = error.find('.//Number').text
source = error.find('.//Source').text
description = error.find('.//Description').text
log.info("USPS Error: Code %s: %s" % (err_num, description))
def verbose_log(self, *args, **kwargs):
if config_value('shipping.modules.usps', 'VERBOSE_LOG'):
log.debug(*args, **kwargs)
| bsd-3-clause | 01afecf8cf0b5e164b8f1ecfa02c8862 | 36.933718 | 108 | 0.538631 | 4.153676 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/satchmo_ext/brand/templatetags/satchmo_brands.py | 13 | 1239 | """Tags for manipulating brands on templates."""
from django.template import Library, Node, TemplateSyntaxError
from satchmo_ext.brand.models import Brand
from satchmo_utils.templatetags import get_filter_args
register = Library()
class BrandListNode(Node):
"""Template Node tag which pushes the brand list into the context"""
def __init__(self, var, nodelist):
self.var = var
self.nodelist = nodelist
def render(self, context):
brands = Brand.objects.active()
context[self.var] = brands
context.push()
context[self.var] = brands
output = self.nodelist.render(context)
context.pop()
return output
def do_brandlistnode(parser, token):
"""Push the brand list into the context using the given variable name.
Sample usage::
{% brand_list as var %}
"""
args = token.split_contents()
if len(args) != 3:
raise TemplateSyntaxError("%r tag expecting '[slug] as varname', got: %s" % (args[0], args))
var = args[2]
nodelist = parser.parse(('endbrand_list',))
parser.delete_first_token()
return BrandListNode(var, nodelist)
register.tag('brand_list', do_brandlistnode)
| bsd-3-clause | f8079a0fbdd9a04c24f18ac7ea5037cc | 29.219512 | 100 | 0.639225 | 3.971154 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/shipping/modules/fedex/shipper.py | 4 | 10772 | '''
FedEx Shipping Module
v0.4
By Neum Schmickrath - www.pageworthy.com
You must have a Fedex account to use this module.
You may register at fedex.com
'''
from decimal import Decimal
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.template import loader, Context
from django.core.cache import cache
from shipping.modules.base import BaseShipper
from shipping import signals
from livesettings import config_get_group
import urllib2
from xml.dom import minidom
import logging
log = logging.getLogger('fedex.shipper')
class Shipper(BaseShipper):
def __init__(self, cart=None, contact=None, service_type=None):
self._calculated = False
self.cart = cart
self.contact = contact
self.raw_results = '(not processed)'
if service_type:
self.service_type_code = service_type[0]
self.service_type_text = service_type[1]
else:
self.service_type_code = '99'
self.service_type_text = 'Uninitialized'
# Had to edit this so the shipping name did not error out for being more than 30 characters. Old code is commented out.
#self.id = u'FedEx-%s-%s' % (self.service_type_code, self.service_type_text)
self.id = u'%s' % (self.service_type_text)
#if cart or contact:
# self.calculate(cart, contact)
def __str__(self):
'''
This is mainly helpful for debugging purposes
'''
return 'FedEx'
def __unicode__(self):
'''
As is this.
'''
return 'FedEx'
def description(self):
'''
A basic description that will be displayed to the user when
selecting their shipping options
'''
return _('FedEx - %s' % self.service_type_text)
def cost(self):
'''
Complex calculations can be done here as long as the return
value is a decimal figure
'''
assert(self._calculated)
return(Decimal(self.charges))
def method(self):
'''
Describes the actual delivery service (Mail, FedEx, DHL, UPS, etc)
'''
return _('FedEx')
def expectedDelivery(self):
'''
Can be a plain string or complex calcuation
returning an actual date
'''
if self.delivery_days <> '1':
return _('%s business days' % self.delivery_days)
else:
return _('%s business day' % self.delivery_days)
def valid(self, order=None):
'''
Can do complex validation about whether or not this
option is valid. For example, may check to see if the
recipient is in an allowed country or location.
'''
return self.is_valid
def _check_for_error(self, response):
'''
Check XML response, see if it indicates an error.
Expects 'response' to already have been run through
minidom.parseString()
'''
if response.getElementsByTagName('Error'):
# we have an error!
error_code = response.getElementsByTagName('Error')[0].getElementsByTagName('Code')[0].firstChild.nodeValue
error_mesg = response.getElementsByTagName('Error')[0].getElementsByTagName('Message')[0].firstChild.nodeValue
log.info('Fedex Error: %s - Code: %s', error_mesg, error_code)
return (error_mesg, error_code)
else:
# all clear.
return False
def _process_request(self, connection, request):
'''
Post the data and return the XML response
'''
conn = urllib2.Request(url=connection, data=request)
f = urllib2.urlopen(conn)
all_results = f.read()
self.raw_response = all_results
return(minidom.parseString(all_results))
def calculate(self, cart, contact):
'''
Based on the chosen Fedex method, we will do our call(s)
to FedEx and see how much it will cost. We will also need
to store the results for further parsing and return via the
methods above.
'''
log.debug("Starting fedex calculations")
from satchmo_store.shop.models import Config
settings = config_get_group('shipping.modules.fedex')
verbose = settings.VERBOSE_LOG.value
self.delivery_days = _('3 - 4') #Default setting for ground delivery
shop_details = Config.objects.get_current()
self.packaging = ''
# FedEx Ground Home Delivery Packaging must be YOURPACKAGING only.
if self.service_type_code in ('FEDEXGROUND', 'GROUNDHOMEDELIVERY'):
self.packaging = 'YOURPACKAGING'
else:
self.packaging = settings.SHIPPING_PACKAGE.value
if verbose:
log.debug('Calculating fedex with type=%s, packaging=%s', self.service_type_code, self.packaging)
self.is_valid = False
error = False
if not settings.ACCOUNT.value:
log.warn("No fedex account found in settings")
return
if not settings.METER_NUMBER.value:
log.warn("No fedex meter number found in settings")
return
configuration = {
'account': settings.ACCOUNT.value,
'meter': settings.METER_NUMBER.value,
'packaging': self.packaging,
'ship_type': self.service_type_code,
'shop_details':shop_details,
}
if settings.LIVE.value:
connection = settings.CONNECTION.value
else:
connection = settings.CONNECTION_TEST.value
self.charges = 0
box_weight_units = "LB"
# FedEx requires that the price be formatted to 2 decimal points.
# e.g., 1.00, 10.40, 3.50
# They also require that the weight be one decimal point.
# e.g., 1.0, 2.3, 10.4
if settings.SINGLE_BOX.value:
if verbose:
log.debug("Using single-box method for fedex calculations.")
box_price = Decimal("0.00")
box_weight = Decimal("0.00")
for product in cart.get_shipment_list():
box_price += product.unit_price
if product.weight is None:
log.warn("No weight on product (skipping for ship calculations): %s", product)
else:
box_weight += product.weight
if product.weight_units and product.weight_units != "":
box_weight_units = product.weight_units
if box_weight < Decimal("0.1"):
log.debug("Total box weight too small, defaulting to 0.1")
box_weight = Decimal("0.1")
shippingdata = {
'config': configuration,
'box_price': '%.2f' % box_price,
'box_weight' : '%.1f' % box_weight,
'box_weight_units' : box_weight_units.upper(),
'contact': contact,
'shipping_address' : shop_details,
'shipping_phone' : shop_details.phone,
'shipping_country_code' : shop_details.country.iso2_code
}
signals.shipping_data_query.send(Shipper, shipper=self, cart=cart, shippingdata=shippingdata)
c = Context(shippingdata)
t = loader.get_template('shipping/fedex/request.xml')
request = t.render(c)
try:
response = self._process_request(connection, request)
error = self._check_for_error(response)
if verbose:
log.debug("Fedex request: %s", request)
log.debug("Fedex response: %s", self.raw_response)
if not error:
this_charge = float(response.documentElement.getElementsByTagName('NetCharge')[0].firstChild.nodeValue)
this_discount = float(response.documentElement.getElementsByTagName('EffectiveNetDiscount')[0].firstChild.nodeValue)
self.delivery_days = response.documentElement.getElementsByTagName('TimeInTransit')[0].firstChild.nodeValue
total_cost = this_charge + this_discount
self.charges += total_cost
except urllib2.URLError:
log.warn("Error opening url: %s", connection)
error = True
else:
# process each shippable separately
# I'm not certain why FedEx implemented their 'Web Service'
# this way. However, you can't give FedEx a list of boxes
# and get back a list of prices (as you can with UPS).
# Each box has to be a completely new transaction - that
# is, a separate POST to their server.
#
# So, to simulate this functionality, and return a total
# price, we have to loop through all of our items, and
# pray the customer isn't ordering a thousand boxes of bagels.
for product in cart.get_shipment_list():
c = Context({
'config': configuration,
'box_weight' : '%.1f' % (product.weight or 0.0),
'box_weight_units' : product.weight_units and product.weight_units.upper() or 'LB',
'box_price' : '%.2f' % product.unit_price,
'contact': contact,
})
t = loader.get_template('shipping/fedex/request.xml')
request = t.render(c)
response = self._process_request(connection, request)
error = self._check_for_error(response)
if verbose:
log.debug("Fedex request: %s", request)
log.debug("Fedex response: %s", self.raw_response)
if not error:
this_charge = float(response.documentElement.getElementsByTagName('NetCharge')[0].firstChild.nodeValue)
this_discount = float(response.documentElement.getElementsByTagName('EffectiveNetDiscount')[0].firstChild.nodeValue)
self.delivery_days = response.documentElement.getElementsByTagName('TimeInTransit')[0].firstChild.nodeValue
total_cost = this_charge + this_discount
self.charges += total_cost
else:
break
if not error:
self.charges = str(self.charges)
self.is_valid = True
self._calculated = True
| bsd-3-clause | 143bb423f48685ef73c81a77cbce0fff | 34.906667 | 136 | 0.567118 | 4.45124 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/satchmo_store/shop/views/orders.py | 13 | 1715 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from satchmo_store.contact.models import Contact
from satchmo_store.shop.models import Order
from satchmo_utils.views import bad_or_missing
from livesettings import config_value
def order_history(request):
orders = None
try:
contact = Contact.objects.from_request(request, create=False)
orders = Order.objects.filter(contact=contact).order_by('-time_stamp')
except Contact.DoesNotExist:
contact = None
ctx = RequestContext(request, {
'contact' : contact,
'default_view_tax': config_value('TAX', 'DEFAULT_VIEW_TAX'),
'orders' : orders})
return render_to_response('shop/order_history.html', context_instance=ctx)
order_history = login_required(order_history)
def order_tracking(request, order_id):
order = None
try:
contact = Contact.objects.from_request(request, create=False)
try:
order = Order.objects.get(id__exact=order_id, contact=contact)
except Order.DoesNotExist:
pass
except Contact.DoesNotExist:
contact = None
if order is None:
return bad_or_missing(request, _("The order you have requested doesn't exist, or you don't have access to it."))
ctx = RequestContext(request, {
'default_view_tax': config_value('TAX', 'DEFAULT_VIEW_TAX'),
'contact' : contact,
'order' : order})
return render_to_response('shop/order_tracking.html', context_instance=ctx)
order_tracking = login_required(order_tracking)
| bsd-3-clause | 7488303d082a399beba8b4df08b6427f | 34 | 120 | 0.689213 | 3.997669 | false | true | false | false |
dokterbob/satchmo | satchmo/apps/satchmo_utils/cookies.py | 13 | 3056 | """From http://www.davidcramer.net/code/62/set-cookies-without-a-response-in-django.html
Used with permission. Thank you David Cramer.
To install, add the prehandler to the beginning of your middleware list, and the posthandler to the end. Then, you can use request.COOKIES.get|set|delete anywhere in the code and they'll be automatically added to the response.
"""
from Cookie import SimpleCookie, Morsel
import copy
class CookiePreHandlerMiddleware(object):
"""
This middleware modifies request.COOKIES and adds a set and delete method.
`set` matches django.http.HttpResponse.set_cookie
`delete` matches django.http.HttpResponse.delete_cookie
This should be the first middleware you load.
"""
def process_request(self, request):
cookies = CookieHandler()
for k, v in request.COOKIES.iteritems():
cookies[k] = str(v)
request.COOKIES = cookies
request._orig_cookies = copy.deepcopy(request.COOKIES)
class CookiePostHandlerMiddleware(object):
"""
This middleware modifies updates the response will all modified cookies.
This should be the last middleware you load.
"""
def process_response(self, request, response):
if hasattr(request, '_orig_cookies') and request.COOKIES != request._orig_cookies:
for k,v in request.COOKIES.iteritems():
if request._orig_cookies.get(k) != v:
dict.__setitem__(response.cookies, k, v)
return response
class StringMorsel(Morsel):
def __str__(self):
return self.value
def __eq__(self, a):
if isinstance(a, str):
return str(self) == a
elif isinstance(a, Morsel):
return a.output() == self.output()
return False
def __ne__(self, a):
if isinstance(a, str):
return str(self) != a
elif isinstance(a, Morsel):
return a.output() != self.output()
return True
def __repr__(self):
return str(self)
class CookieHandler(SimpleCookie):
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, StringMorsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def set(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=None):
self[key] = value
for var in ('max_age', 'path', 'domain', 'secure', 'expires'):
val = locals()[var]
if val is not None:
self[key][var.replace('_', '-')] = val
def delete(self, key, path='/', domain=None):
self[key] = ''
if path is not None:
self[key]['path'] = path
if domain is not None:
self[key]['domain'] = domain
self[key]['expires'] = 0
self[key]['max-age'] = 0 | bsd-3-clause | e69242c12d6ceedf0f61ffe4f14b4204 | 34.137931 | 227 | 0.606348 | 3.933076 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/satchmo_ext/newsletter/simple.py | 13 | 1676 | """ Just tracks subscriptions, nothing more. """
from satchmo_ext.newsletter.models import Subscription
from django.utils.translation import ugettext as _
import logging
log = logging.getLogger('simple newsletter')
def is_subscribed(contact):
return Subscription.email_is_subscribed(contact.email)
def update_contact(contact, subscribe, attributes={}):
email = contact.email
current = Subscription.email_is_subscribed(email)
attributesChanged = False
sub = None
if attributes:
sub, created = Subscription.objects.get_or_create(email=email)
if created:
attributesChanged = True
else:
oldAttr = [(a.name,a.value) for a in sub.attributes.all()]
oldAttr.sort()
sub.update_attributes(attributes)
newAttr = [(a.name,a.value) for a in sub.attributes.all()]
newAttr.sort()
if not created:
attributesChanged = oldAttr != newAttr
if current == subscribe:
if subscribe:
if attributesChanged:
result = _("Updated subscription for %(email)s.")
else:
result = _("Already subscribed %(email)s.")
else:
result = _("Already removed %(email)s.")
else:
if not sub:
sub, created = Subscription.objects.get_or_create(email=email)
sub.subscribed = subscribe
sub.save()
log.debug("Subscription now: %s" % sub)
if subscribe:
result = _("Subscribed: %(email)s")
else:
result = _("Unsubscribed: %(email)s")
return result % { 'email' : email }
| bsd-3-clause | cad8c28d026cda3c263924c88cdb8fa2 | 30.037037 | 74 | 0.588305 | 4.422164 | false | false | false | false |
dokterbob/satchmo | scripts/clonesatchmo.py | 1 | 6127 | #!/usr/bin/env python
"""
This is the installation script for Satchmo. It will create the base Satchmo configuration.
Before running this script, you must have python and pip installed.
It is also recommended that you install Python Imaging using your distribution's
package method.
The simplest way to install Satchmo would be:
pip install -r http://bitbucket.org/chris1610/satchmo/raw/tip/scripts/requirements.txt
pip install -e hg+http://bitbucket.org/chris1610/satchmo/#egg=satchmo
Then run:
python clonesatchmo.py
"""
import os
import shutil
from random import choice
import re
from optparse import OptionParser
import string
__VERSION__ = "0.2"
def parse_command_line():
usage = 'usage: %prog [options]'
version = 'Version: %prog ' + '%s' % __VERSION__
parser = OptionParser(usage=usage, version=version)
parser.add_option('-s', '--site', action='store',type='string', default='store',
dest='site_name', help="Top level directory name for the site. [default: %default]")
parser.add_option('-l', '--localsite', action='store',type='string', default='localsite',
dest='local_site_name', help="Name for the local application stub. [default: %default]")
parser.add_option('--skel', action='store', type='string', default = None,
dest='skeleton_dir', help="Path to the skeleton directory")
opts, args = parser.parse_args()
return opts, args
def check_skeleton_dir(skel_dir):
"""
Verify that the skeleton directory exists and that it points
to a location with a localsite subdir.
"""
if skel_dir is None:
return (True, "")
if os.path.isdir(skel_dir):
check_dir = os.path.join(skel_dir, 'localsite')
if not os.path.isdir(check_dir):
return (False, "Skeleton directory does not contain localsite subdirectory. Path should be /path/to/satchmo/projects/skeleton")
else:
return (False, "Invalid skeleton directory. Path should be /path/to/satchmo/projects/skeleton")
return (True, "")
def install_pil():
os.system('pip install %s' % pil_requirements)
def create_satchmo_site(site_name, skeleton_dir):
"""
If we are passed a skeleton_dir, use it
If we aren't we assume the script is being run from the source tree so
we try to find it.
If this doesn't work, let the user know they need to specify it manually
"""
if skeleton_dir:
src_dir = os.path.abspath(skeleton_dir)
else:
clone_dir = os.path.dirname(__file__)
src_dir = os.path.abspath(os.path.join(clone_dir,'../satchmo/projects/skeleton'))
result,msg = check_skeleton_dir(src_dir)
if not result:
return (False, msg)
dest_dir = os.path.join('./',site_name)
shutil.copytree(src_dir, dest_dir)
return (True, "")
def customize_files(site_name, local_site_name):
"""
We need to make a couple of change to the files copied from the skeleton directory.
Set the SECRET_KEY to a random value
Set the ROOT_URLCONF
Set the DJANGO_PROJECT
Set the DJANGO_SETTINGS_MODULE
We also need to change the directory name to local_site_name
"""
dest_dir = os.path.join('./',site_name)
# Create a random SECRET_KEY hash, and put it in the main settings.
main_settings_file = os.path.join(dest_dir, 'settings.py')
settings_contents = open(main_settings_file, 'r').read()
fp = open(main_settings_file, 'w')
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents)
# Configure the other variables that need to be modified
root_urlconf = site_name + '.urls'
settings_contents = re.sub(r"(?<=ROOT_URLCONF = ')'", root_urlconf + "'",settings_contents)
django_settings = site_name + '.settings'
settings_contents = re.sub(r"(?<=DJANGO_PROJECT = ')'", site_name + "'",settings_contents)
settings_contents = re.sub(r"(?<=DJANGO_SETTINGS_MODULE = ')'", django_settings + "'",settings_contents)
local_app = "%s.%s" % (site_name,local_site_name)
settings_contents = settings_contents.replace("simple.localsite",local_app)
fp.write(settings_contents)
fp.close()
# rename the local_app directory
os.rename(os.path.join(dest_dir,'localsite'), os.path.join(dest_dir,local_site_name))
def setup_satchmo(site_name, local_site_name):
"""
Do the final configs for satchmo
"""
os.system('cd %s && python manage.py satchmo_copy_static' % site_name)
os.system('cd %s && python manage.py syncdb' % site_name)
os.system('cd %s && python manage.py satchmo_load_l10n' % site_name)
os.system('cd %s && python manage.py satchmo_load_store' % site_name)
os.system('cd %s && python manage.py satchmo_rebuild_pricing' % site_name)
if __name__ == '__main__':
opts, args = parse_command_line()
errors = []
dest_dir = os.path.join('./',opts.site_name)
result, msg = check_skeleton_dir(opts.skeleton_dir)
if not result:
errors.append(msg)
if os.path.isdir(dest_dir):
errors.append("The destination directory already exists. This script can only be used to create new projects.")
try:
import PIL as Image
except ImportError:
errors.append("The Python Imaging Library is not installed. Install from your distribution binaries.")
if errors:
for error in errors:
print error
exit()
print "Creating the Satchmo Application"
result, msg = create_satchmo_site(opts.site_name, opts.skeleton_dir)
if not result:
print msg
exit()
print "Customizing the files"
customize_files(opts.site_name, opts.local_site_name)
print "Performing initial data synching"
setup_satchmo(opts.site_name, opts.local_site_name)
print "Store installation complete."
print "You may run the server by typying: \n cd %s \n python manage.py runserver" % opts.site_name
| bsd-3-clause | fa49a8b9effea257a7c1aeec510361ec | 39.309211 | 139 | 0.654643 | 3.601999 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/product/prices.py | 13 | 2704 | from decimal import Decimal
from l10n.utils import moneyfmt
import datetime
def get_product_quantity_adjustments(product, qty=1, parent=None):
"""Gets a list of adjustments for the price found for a product/qty"""
qty_discounts = product.price_set.exclude(
expires__isnull=False,
expires__lt=datetime.date.today()).filter(quantity__lte=qty)
# Get the price with the quantity closest to the one specified without going over
adjustments = qty_discounts.order_by('price','-quantity', 'expires')[:1]
if adjustments:
adjustments = adjustments[0].adjustments(product)
else:
adjustments = None
if parent:
adjustments = get_product_quantity_adjustments(parent, qty=qty)
if not adjustments:
adjustments = PriceAdjustmentCalc(None)
return adjustments
def get_product_quantity_price(product, qty=Decimal('1'), delta=Decimal("0.00"), parent=None):
"""
Returns price as a Decimal else None.
First checks the product, if none, then checks the parent.
"""
adjustments = get_product_quantity_adjustments(product, qty=qty, parent=parent)
return adjustments.final_price()+delta
# -------------------------------------------
# helper objects - not Django model objects
class PriceAdjustmentCalc(object):
"""Helper class to handle adding up product pricing adjustments"""
def __init__(self, price, product=None):
self.price = price
self.base_product = product
self.adjustments = []
def __add__(self, adjustment):
self.adjustments.append(adjustment)
return self
def total_adjustment(self):
total = Decimal(0)
for adj in self.adjustments:
total += adj.amount
return total
def _product(self):
"""Lazy product dereference"""
if self.base_product:
product = self.base_product
else:
product = self.price.product
return product
product = property(fget=_product)
def final_price(self):
total = Decimal(0)
if self.price:
total = self.price.price
if total is None:
total = Decimal(0)
return total - self.total_adjustment()
class PriceAdjustment(object):
"""A single product pricing adjustment"""
def __init__(self, key, label=None, amount=None):
if label is None:
label = key.capitalize()
if amount is None:
amount = Decimal(0)
self.key = key
self.label = label
self.amount = amount
def __unicode__(self):
return u"%s: %s=%s" % (_('Price Adjustment'), self.label, moneyfmt(self.amount))
| bsd-3-clause | ae95b91ed399be4b8d1f4d971336cef4 | 29.044444 | 94 | 0.620192 | 4.16641 | false | false | false | false |
dokterbob/satchmo | satchmo/apps/payment/modules/sagepay/forms.py | 12 | 4637 | """Sage Pay Form"""
from django import forms
from django.utils.translation import ugettext as _
from livesettings import config_value
from payment.forms import CreditPayShipForm, MONTHS
from payment.modules.sagepay.config import REQUIRES_ISSUE_NUMBER
import datetime
import logging
log = logging.getLogger('payment.sagepay.forms')
class SagePayShipForm(CreditPayShipForm):
"""Adds fields required by Sage Pay to the Credit form."""
card_holder = forms.CharField(max_length=75, required=False)
month_start = forms.ChoiceField(choices=[(1, '--')]+MONTHS, required=False)
year_start = forms.ChoiceField(required=False)
issue_num = forms.CharField(max_length=2, required=False)
def __init__(self, request, paymentmodule, *args, **kwargs):
super(SagePayShipForm, self).__init__(request, paymentmodule, *args, **kwargs)
cf = self.fields['card_holder']
if (not cf.initial) or cf.initial == "":
user = request.user
if user and user.is_authenticated() and user.contact_set.count() > 0:
cf.initial = self.tempContact.full_name
self.requires_issue_number = REQUIRES_ISSUE_NUMBER
num_years = config_value('PAYMENT', 'CC_NUM_YEARS')
year_now = datetime.date.today().year
self.fields['year_start'].choices = [(year, year) for year in range(year_now, year_now-num_years-1, -1)]
def save(self, request, cart, contact, payment_module, data=None):
"""Save the order and the credit card details."""
super(SagePayShipForm, self).save(request, cart, contact, payment_module)
if data is None:
data = self.cleaned_data
log.debug("data: %s", data)
card_holder=data.get('card_holder', '')
if not card_holder:
card_holder = contact.full_name
self.cc.card_holder = card_holder
month_start = data.get('month_start', None)
year_start = data.get('year_start', None)
if month_start:
try:
month_start = int(month_start)
except ValueError:
log.warn("Could not parse month_start int from %s on order for order: %s", month_start, self.order)
month_start = 1
# TODO: raise some error to be caught by processor
else:
month_start=None
if year_start:
try:
year_start = int(year_start)
except ValueError:
log.warn("Could not parse year_start int from %s on order for order: %s", year_start, self.order)
year_start = 1
else:
year_start=None
issue_num = data.get('issue_num', "")
self.cc.start_month=month_start
self.cc.start_year=year_start
self.cc.issue_num=issue_num
self.cc.save()
def clean_card_holder(self):
ch = self.cleaned_data['card_holder']
if (not ch) or ch == "":
fn = self.tempContact.full_name
self.cleaned_data['card_holder'] = fn
log.debug('Setting card_holder to contact full name: %s', fn)
else:
log.debug('Card holder OK')
def clean_month_start(self):
data = self.cleaned_data
self._maybe_require(data, 'month_start', _('You must provide a starting month when using this type of card.'))
if data['month_start']:
try:
v = int(self.cleaned_data['month_start'])
except ValueError:
raise forms.ValidationError(_("This must be a number"))
if not v>0 and v<13:
raise forms.ValidationError(_("Out of range, must be 1-12"))
def clean_year_start(self):
data = self.cleaned_data
self._maybe_require(data, 'credit_type', _('You must provide a starting year when using this type of card.'))
if data['year_start']:
try:
v = int(self.cleaned_data['year_start'])
except ValueError:
raise forms.ValidationError(_("This must be a number"))
if not v>0 and v<13:
raise forms.ValidationError(_("Out of range, must be 1-12"))
def clean_issue_num(self):
data = self.cleaned_data
self._maybe_require(data, 'issue_num', _('You must provide an issue number when using this type of card.'))
def _maybe_require(self, data, field, message):
if data['credit_type'] in REQUIRES_ISSUE_NUMBER and not (data[field]):
raise forms.ValidationError(message)
| bsd-3-clause | 2d40710f7227c002b705345f1d13d443 | 40.401786 | 118 | 0.589821 | 3.997414 | false | false | false | false |
django/djangosnippets.org | cab/forms.py | 1 | 2453 | from django import forms
from django.contrib import admin
from .models import VERSIONS, Language, Snippet, SnippetFlag
def validate_non_whitespace_only_string(value):
"""
Additionally to requiring a non-empty string, this validator also strips
the string to treat strings with only whitespaces in them as empty.
"""
if not value or not value.strip():
raise forms.ValidationError(u"This field is required", code="required")
class SnippetForm(forms.ModelForm):
title = forms.CharField(validators=[validate_non_whitespace_only_string])
description = forms.CharField(validators=[validate_non_whitespace_only_string], widget=forms.Textarea)
code = forms.CharField(validators=[validate_non_whitespace_only_string], widget=forms.Textarea)
class Meta:
model = Snippet
exclude = (
"author",
"bookmark_count",
"rating_score",
)
class SnippetFlagForm(forms.ModelForm):
class Meta:
model = SnippetFlag
fields = ("flag",)
class AdvancedSearchForm(forms.Form):
q = forms.CharField(required=False, label="Search", widget=forms.TextInput(attrs={"type": "search"}))
language = forms.ModelChoiceField(queryset=Language.objects.all(), required=False)
version = forms.MultipleChoiceField(choices=VERSIONS, required=False)
minimum_pub_date = forms.DateTimeField(widget=admin.widgets.AdminDateWidget, required=False)
minimum_bookmark_count = forms.IntegerField(required=False)
minimum_rating_score = forms.IntegerField(required=False)
def search(self, sqs):
# First, store the SearchQuerySet received from other processing.
if self.cleaned_data["q"]:
sqs = sqs.filter(search=self.cleaned_data["q"])
if self.cleaned_data["language"]:
sqs = sqs.filter(language__name=self.cleaned_data["language"].name)
if self.cleaned_data["version"]:
sqs = sqs.filter(version__in=self.cleaned_data["version"])
if self.cleaned_data["minimum_pub_date"]:
sqs = sqs.filter(pub_date__gte=self.cleaned_data["minimum_pub_date"])
if self.cleaned_data["minimum_bookmark_count"]:
sqs = sqs.filter(bookmark_count__gte=self.cleaned_data["minimum_bookmark_count"])
if self.cleaned_data["minimum_rating_score"]:
sqs = sqs.filter(rating_score__gte=self.cleaned_data["minimum_rating_score"])
return sqs
| bsd-3-clause | e3d8e3adc241afe2ed834b9fd6d5d98e | 37.328125 | 106 | 0.682837 | 3.9248 | false | false | false | false |
django/djangosnippets.org | djangosnippets/settings/production.py | 1 | 4022 | from __future__ import absolute_import
import os
from urllib import parse
import dj_database_url
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from .base import * # noqa: F403
def env_to_bool(input):
"""
Must change String from environment variable into Boolean
defaults to True
"""
if isinstance(input, str):
return input not in ("False", "false")
else:
return input
DEBUG = env_to_bool(os.environ.get("DEBUG", False))
# Use the cached template loader.
del TEMPLATES[0]["APP_DIRS"]
TEMPLATES[0]["OPTIONS"]["loaders"] = (
(
"django.template.loaders.cached.Loader",
(
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
),
),
)
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_STORAGE_BUCKET_NAME", "")
AWS_S3_CUSTOM_DOMAIN = os.environ.get("AWS_S3_CUSTOM_DOMAIN", "")
AWS_PRELOAD_METADATA = True
# AWS_IS_GZIPPED = True
AWS_S3_USE_SSL = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_URL_PROTOCOL = "//:"
SECURE_SSL_REDIRECT = os.environ.get("SECURE_SSL_REDIRECT", False)
SECURE_HSTS_SECONDS = 600
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = env_to_bool(os.environ.get("SESSION_COOKIE_SECURE", True))
SESSION_COOKIE_HTTPONLY = True
# The header Heroku uses to indicate SSL:
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Pull the various config info from Heroku.
# Heroku adds some of this automatically if we're using a simple settings.py,
# but we're not and it's just as well -- I like doing this by hand.
# Grab database info
DATABASES = {"default": dj_database_url.config()}
# Make sure urlparse understands custom config schemes.
parse.uses_netloc.append("redis")
# Now do redis and the cache.
redis_url = parse.urlparse(os.environ.get("REDISCLOUD_URL"))
CACHES = {
"default": {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": "%s:%s" % (redis_url.hostname, redis_url.port),
"OPTIONS": {
"PASSWORD": redis_url.password,
"DB": 0,
},
}
}
# Use Sentry for debugging if available.
if "SENTRY_DSN" in os.environ:
sentry_sdk.init(dsn=os.environ.get("SENTRY_DSN"), integrations=[DjangoIntegration()], send_default_pii=True)
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = os.environ.get("SENDGRID_USERNAME")
EMAIL_HOST_PASSWORD = os.environ.get("SENDGRID_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"root": {
"level": "WARNING",
"handlers": ["sentry"],
},
"formatters": {
"verbose": {"format": "%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"},
},
"handlers": {
"sentry": {
"level": "ERROR",
"class": "raven.contrib.django.handlers.SentryHandler",
},
"console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "verbose"},
},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
"raven": {
"level": "DEBUG",
"handlers": ["console"],
"propagate": False,
},
"sentry.errors": {
"level": "DEBUG",
"handlers": ["console"],
"propagate": False,
},
},
}
SECRET_KEY = os.environ["SECRET_KEY"]
AKISMET_SECRET_API_KEY = os.environ.get("AKISMET_KEY", "")
RECAPTCHA_PUBLIC_KEY = os.environ.get("RECAPTCHA_PUBLIC_KEY", "")
RECAPTCHA_PRIVATE_KEY = os.environ.get("RECAPTCHA_PRIVATE_KEY", "")
RECAPTCHA_USE_SSL = True
| bsd-3-clause | 17ed465bc7527125066324a22ba4ea30 | 28.144928 | 112 | 0.63277 | 3.351667 | false | false | false | false |
django/djangosnippets.org | cab/templatetags/cab_tags.py | 1 | 1078 | from django import template
from django.contrib.postgres.search import SearchVector
from ..models import Bookmark, Snippet, SnippetFlag
register = template.Library()
@register.filter
def is_bookmarked(snippet, user):
"""
{% if snippet|is_bookmarked:request.user %}
already bookmarked
{% else %}
not bookmarked yet
{% endif %}
"""
if not user.is_authenticated:
return False
return Bookmark.objects.filter(snippet=snippet, user=user).exists()
@register.filter
def has_flagged(user, snippet):
if not user.is_authenticated:
return False
return SnippetFlag.objects.filter(snippet=snippet, user=user).exists()
@register.filter
def more_like_this(snippet, limit=None):
try:
sqs = Snippet.objects.annotate(
search=SearchVector(
"language__name",
)
)
sqs = sqs.filter(language__name=snippet.language).exclude(pk=snippet.pk)
if limit is not None:
sqs = sqs[:limit]
except AttributeError:
sqs = []
return sqs
| bsd-3-clause | 3358f89f20212cc4326265db310bb03a | 24.069767 | 80 | 0.641929 | 3.85 | false | false | false | false |
scrapy/scrapy | scrapy/commands/startproject.py | 1 | 4041 | import re
import os
import string
from importlib.util import find_spec
from pathlib import Path
from shutil import ignore_patterns, move, copy2, copystat
from stat import S_IWUSR as OWNER_WRITE_PERMISSION
import scrapy
from scrapy.commands import ScrapyCommand
from scrapy.utils.template import render_templatefile, string_camelcase
from scrapy.exceptions import UsageError
TEMPLATES_TO_RENDER = (
('scrapy.cfg',),
('${project_name}', 'settings.py.tmpl'),
('${project_name}', 'items.py.tmpl'),
('${project_name}', 'pipelines.py.tmpl'),
('${project_name}', 'middlewares.py.tmpl'),
)
IGNORE = ignore_patterns('*.pyc', '__pycache__', '.svn')
def _make_writable(path):
current_permissions = os.stat(path).st_mode
os.chmod(path, current_permissions | OWNER_WRITE_PERMISSION)
class Command(ScrapyCommand):
requires_project = False
default_settings = {'LOG_ENABLED': False,
'SPIDER_LOADER_WARN_ONLY': True}
def syntax(self):
return "<project_name> [project_dir]"
def short_desc(self):
return "Create new project"
def _is_valid_name(self, project_name):
def _module_exists(module_name):
spec = find_spec(module_name)
return spec is not None and spec.loader is not None
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
print('Error: Project names must begin with a letter and contain'
' only\nletters, numbers and underscores')
elif _module_exists(project_name):
print(f'Error: Module {project_name!r} already exists')
else:
return True
return False
def _copytree(self, src: Path, dst: Path):
"""
Since the original function always creates the directory, to resolve
the issue a new function had to be created. It's a simple copy and
was reduced for this case.
More info at:
https://github.com/scrapy/scrapy/pull/2005
"""
ignore = IGNORE
names = [x.name for x in src.iterdir()]
ignored_names = ignore(src, names)
if not dst.exists():
dst.mkdir(parents=True)
for name in names:
if name in ignored_names:
continue
srcname = src / name
dstname = dst / name
if srcname.is_dir():
self._copytree(srcname, dstname)
else:
copy2(srcname, dstname)
_make_writable(dstname)
copystat(src, dst)
_make_writable(dst)
def run(self, args, opts):
if len(args) not in (1, 2):
raise UsageError()
project_name = args[0]
if len(args) == 2:
project_dir = Path(args[1])
else:
project_dir = Path(args[0])
if (project_dir / 'scrapy.cfg').exists():
self.exitcode = 1
print(f'Error: scrapy.cfg already exists in {project_dir.resolve()}')
return
if not self._is_valid_name(project_name):
self.exitcode = 1
return
self._copytree(Path(self.templates_dir), project_dir.resolve())
move(project_dir / 'module', project_dir / project_name)
for paths in TEMPLATES_TO_RENDER:
tplfile = Path(project_dir, *(string.Template(s).substitute(project_name=project_name) for s in paths))
render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name))
print(f"New Scrapy project '{project_name}', using template directory "
f"'{self.templates_dir}', created in:")
print(f" {project_dir.resolve()}\n")
print("You can start your first spider with:")
print(f" cd {project_dir}")
print(" scrapy genspider example example.com")
@property
def templates_dir(self) -> str:
return str(Path(
self.settings['TEMPLATES_DIR'] or Path(scrapy.__path__[0], 'templates'),
'project'
))
| bsd-3-clause | 8fac05b01fb7be6c3ca3c68a78ce4a85 | 31.328 | 115 | 0.594407 | 3.930934 | false | false | false | false |
scrapy/scrapy | scrapy/http/request/rpc.py | 4 | 1087 | """
This module implements the XmlRpcRequest class which is a more convenient class
(that Request) to generate xml-rpc requests.
See documentation in docs/topics/request-response.rst
"""
import xmlrpc.client as xmlrpclib
from typing import Optional
from scrapy.http.request import Request
from scrapy.utils.python import get_func_args
DUMPS_ARGS = get_func_args(xmlrpclib.dumps)
class XmlRpcRequest(Request):
def __init__(self, *args, encoding: Optional[str] = None, **kwargs):
if 'body' not in kwargs and 'params' in kwargs:
kw = dict((k, kwargs.pop(k)) for k in DUMPS_ARGS if k in kwargs)
kwargs['body'] = xmlrpclib.dumps(**kw)
# spec defines that requests must use POST method
kwargs.setdefault('method', 'POST')
# xmlrpc query multiples times over the same url
kwargs.setdefault('dont_filter', True)
# restore encoding
if encoding is not None:
kwargs['encoding'] = encoding
super().__init__(*args, **kwargs)
self.headers.setdefault('Content-Type', 'text/xml')
| bsd-3-clause | 6be47b87c21042a003c0aa0cda5bc0e5 | 30.057143 | 79 | 0.670653 | 4.05597 | false | false | false | false |
scrapy/scrapy | scrapy/mail.py | 4 | 5156 | """
Mail sending helpers
See documentation in docs/topics/email.rst
"""
import logging
from email import encoders as Encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from io import BytesIO
from twisted.internet import defer, ssl
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.python import to_bytes
logger = logging.getLogger(__name__)
# Defined in the email.utils module, but undocumented:
# https://github.com/python/cpython/blob/v3.9.0/Lib/email/utils.py#L42
COMMASPACE = ", "
def _to_bytes_or_none(text):
if text is None:
return None
return to_bytes(text)
class MailSender:
def __init__(
self, smtphost='localhost', mailfrom='scrapy@localhost', smtpuser=None,
smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False
):
self.smtphost = smtphost
self.smtpport = smtpport
self.smtpuser = _to_bytes_or_none(smtpuser)
self.smtppass = _to_bytes_or_none(smtppass)
self.smtptls = smtptls
self.smtpssl = smtpssl
self.mailfrom = mailfrom
self.debug = debug
@classmethod
def from_settings(cls, settings):
return cls(
smtphost=settings['MAIL_HOST'],
mailfrom=settings['MAIL_FROM'],
smtpuser=settings['MAIL_USER'],
smtppass=settings['MAIL_PASS'],
smtpport=settings.getint('MAIL_PORT'),
smtptls=settings.getbool('MAIL_TLS'),
smtpssl=settings.getbool('MAIL_SSL'),
)
def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):
from twisted.internet import reactor
if attachs:
msg = MIMEMultipart()
else:
msg = MIMENonMultipart(*mimetype.split('/', 1))
to = list(arg_to_iter(to))
cc = list(arg_to_iter(cc))
msg['From'] = self.mailfrom
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
rcpts = to[:]
if cc:
rcpts.extend(cc)
msg['Cc'] = COMMASPACE.join(cc)
if charset:
msg.set_charset(charset)
if attachs:
msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))
for attach_name, mimetype, f in attachs:
part = MIMEBase(*mimetype.split('/'))
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment', filename=attach_name)
msg.attach(part)
else:
msg.set_payload(body)
if _callback:
_callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
if self.debug:
logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': len(attachs)})
return
dfd = self._sendmail(rcpts, msg.as_string().encode(charset or 'utf-8'))
dfd.addCallbacks(
callback=self._sent_ok,
errback=self._sent_failed,
callbackArgs=[to, cc, subject, len(attachs)],
errbackArgs=[to, cc, subject, len(attachs)],
)
reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)
return dfd
def _sent_ok(self, result, to, cc, subject, nattachs):
logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs})
def _sent_failed(self, failure, to, cc, subject, nattachs):
errstr = str(failure.value)
logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
'- %(mailerr)s',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs, 'mailerr': errstr})
def _sendmail(self, to_addrs, msg):
# Import twisted.mail here because it is not available in python3
from twisted.internet import reactor
from twisted.mail.smtp import ESMTPSenderFactory
msg = BytesIO(msg)
d = defer.Deferred()
factory = ESMTPSenderFactory(
self.smtpuser, self.smtppass, self.mailfrom, to_addrs, msg, d,
heloFallback=True, requireAuthentication=False, requireTransportSecurity=self.smtptls,
)
factory.noisy = False
if self.smtpssl:
reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
else:
reactor.connectTCP(self.smtphost, self.smtpport, factory)
return d
| bsd-3-clause | 356ee91640fecb855e19a66b39ccdcdc | 34.558621 | 112 | 0.594841 | 3.730825 | false | false | false | false |
scrapy/scrapy | scrapy/utils/signal.py | 4 | 3107 | """Helper functions for working with signals"""
import collections.abc
import logging
from twisted.internet.defer import DeferredList, Deferred
from twisted.python.failure import Failure
from pydispatch.dispatcher import Anonymous, Any, disconnect, getAllReceivers, liveReceivers
from pydispatch.robustapply import robustApply
from scrapy.exceptions import StopDownload
from scrapy.utils.defer import maybeDeferred_coro
from scrapy.utils.log import failure_to_exc_info
logger = logging.getLogger(__name__)
def send_catch_log(signal=Any, sender=Anonymous, *arguments, **named):
"""Like pydispatcher.robust.sendRobust but it also logs errors and returns
Failures instead of exceptions.
"""
dont_log = named.pop('dont_log', ())
dont_log = tuple(dont_log) if isinstance(dont_log, collections.abc.Sequence) else (dont_log,)
dont_log += (StopDownload, )
spider = named.get('spider', None)
responses = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
try:
response = robustApply(receiver, signal=signal, sender=sender, *arguments, **named)
if isinstance(response, Deferred):
logger.error("Cannot return deferreds from signal handler: %(receiver)s",
{'receiver': receiver}, extra={'spider': spider})
except dont_log:
result = Failure()
except Exception:
result = Failure()
logger.error("Error caught on signal handler: %(receiver)s",
{'receiver': receiver},
exc_info=True, extra={'spider': spider})
else:
result = response
responses.append((receiver, result))
return responses
def send_catch_log_deferred(signal=Any, sender=Anonymous, *arguments, **named):
"""Like send_catch_log but supports returning deferreds on signal handlers.
Returns a deferred that gets fired once all signal handlers deferreds were
fired.
"""
def logerror(failure, recv):
if dont_log is None or not isinstance(failure.value, dont_log):
logger.error("Error caught on signal handler: %(receiver)s",
{'receiver': recv},
exc_info=failure_to_exc_info(failure),
extra={'spider': spider})
return failure
dont_log = named.pop('dont_log', None)
spider = named.get('spider', None)
dfds = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
d = maybeDeferred_coro(robustApply, receiver, signal=signal, sender=sender, *arguments, **named)
d.addErrback(logerror, receiver)
d.addBoth(lambda result: (receiver, result))
dfds.append(d)
d = DeferredList(dfds)
d.addCallback(lambda out: [x[1] for x in out])
return d
def disconnect_all(signal=Any, sender=Any):
"""Disconnect all signal handlers. Useful for cleaning up after running
tests
"""
for receiver in liveReceivers(getAllReceivers(sender, signal)):
disconnect(receiver, signal=signal, sender=sender)
| bsd-3-clause | 00930e85092403c72b88ce8c303d46dc | 38.833333 | 104 | 0.655938 | 4.273728 | false | false | false | false |
scrapy/scrapy | scrapy/commands/settings.py | 3 | 1803 | import json
from scrapy.commands import ScrapyCommand
from scrapy.settings import BaseSettings
class Command(ScrapyCommand):
requires_project = False
default_settings = {'LOG_ENABLED': False,
'SPIDER_LOADER_WARN_ONLY': True}
def syntax(self):
return "[options]"
def short_desc(self):
return "Get settings values"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_argument("--get", dest="get", metavar="SETTING",
help="print raw setting value")
parser.add_argument("--getbool", dest="getbool", metavar="SETTING",
help="print setting value, interpreted as a boolean")
parser.add_argument("--getint", dest="getint", metavar="SETTING",
help="print setting value, interpreted as an integer")
parser.add_argument("--getfloat", dest="getfloat", metavar="SETTING",
help="print setting value, interpreted as a float")
parser.add_argument("--getlist", dest="getlist", metavar="SETTING",
help="print setting value, interpreted as a list")
def run(self, args, opts):
settings = self.crawler_process.settings
if opts.get:
s = settings.get(opts.get)
if isinstance(s, BaseSettings):
print(json.dumps(s.copy_to_dict()))
else:
print(s)
elif opts.getbool:
print(settings.getbool(opts.getbool))
elif opts.getint:
print(settings.getint(opts.getint))
elif opts.getfloat:
print(settings.getfloat(opts.getfloat))
elif opts.getlist:
print(settings.getlist(opts.getlist))
| bsd-3-clause | e11e39f95819e947902a363f8b4027ea | 37.361702 | 82 | 0.583472 | 4.419118 | false | false | false | false |
scrapy/scrapy | scrapy/downloadermiddlewares/ajaxcrawl.py | 7 | 3280 | import re
import logging
from w3lib import html
from scrapy.exceptions import NotConfigured
from scrapy.http import HtmlResponse
logger = logging.getLogger(__name__)
class AjaxCrawlMiddleware:
"""
Handle 'AJAX crawlable' pages marked as crawlable via meta tag.
For more info see https://developers.google.com/webmasters/ajax-crawling/docs/getting-started.
"""
def __init__(self, settings):
if not settings.getbool('AJAXCRAWL_ENABLED'):
raise NotConfigured
# XXX: Google parses at least first 100k bytes; scrapy's redirect
# middleware parses first 4k. 4k turns out to be insufficient
# for this middleware, and parsing 100k could be slow.
# We use something in between (32K) by default.
self.lookup_bytes = settings.getint('AJAXCRAWL_MAXSIZE', 32768)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_response(self, request, response, spider):
if not isinstance(response, HtmlResponse) or response.status != 200:
return response
if request.method != 'GET':
# other HTTP methods are either not safe or don't have a body
return response
if 'ajax_crawlable' in request.meta: # prevent loops
return response
if not self._has_ajax_crawlable_variant(response):
return response
# scrapy already handles #! links properly
ajax_crawl_request = request.replace(url=request.url + '#!')
logger.debug("Downloading AJAX crawlable %(ajax_crawl_request)s instead of %(request)s",
{'ajax_crawl_request': ajax_crawl_request, 'request': request},
extra={'spider': spider})
ajax_crawl_request.meta['ajax_crawlable'] = True
return ajax_crawl_request
def _has_ajax_crawlable_variant(self, response):
"""
Return True if a page without hash fragment could be "AJAX crawlable"
according to https://developers.google.com/webmasters/ajax-crawling/docs/getting-started.
"""
body = response.text[:self.lookup_bytes]
return _has_ajaxcrawlable_meta(body)
# XXX: move it to w3lib?
_ajax_crawlable_re = re.compile(r'<meta\s+name=["\']fragment["\']\s+content=["\']!["\']/?>')
def _has_ajaxcrawlable_meta(text):
"""
>>> _has_ajaxcrawlable_meta('<html><head><meta name="fragment" content="!"/></head><body></body></html>')
True
>>> _has_ajaxcrawlable_meta("<html><head><meta name='fragment' content='!'></head></html>")
True
>>> _has_ajaxcrawlable_meta('<html><head><!--<meta name="fragment" content="!"/>--></head><body></body></html>')
False
>>> _has_ajaxcrawlable_meta('<html></html>')
False
"""
# Stripping scripts and comments is slow (about 20x slower than
# just checking if a string is in text); this is a quick fail-fast
# path that should work for most pages.
if 'fragment' not in text:
return False
if 'content' not in text:
return False
text = html.remove_tags_with_content(text, ('script', 'noscript'))
text = html.replace_entities(text)
text = html.remove_comments(text)
return _ajax_crawlable_re.search(text) is not None
| bsd-3-clause | d05ec132bc62b8b25468a0fdbafeb89e | 34.268817 | 117 | 0.641768 | 3.78316 | false | false | false | false |
turbulenz/gyp | test/compiler-override/gyptest-compiler-env-toolchain.py | 94 | 2877 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the user can override the compiler and linker using
CC/CXX/NM/READELF environment variables.
"""
import TestGyp
import os
import copy
import sys
here = os.path.dirname(os.path.abspath(__file__))
if sys.platform == 'win32':
# cross compiling not supported by ninja on windows
# and make not supported on windows at all.
sys.exit(0)
# Clear any existing compiler related env vars.
for key in ['CC', 'CXX', 'LINK', 'CC_host', 'CXX_host', 'LINK_host',
'NM_target', 'READELF_target']:
if key in os.environ:
del os.environ[key]
def CheckCompiler(test, gypfile, check_for, run_gyp):
if run_gyp:
test.run_gyp(gypfile)
test.build(gypfile)
test.must_contain_all_lines(test.stdout(), check_for)
test = TestGyp.TestGyp(formats=['ninja'])
# Must set the test format to something with a flavor (the part after the '-')
# in order to test the desired behavior. Since we want to run a non-host
# toolchain, we have to set the flavor to something that the ninja generator
# doesn't know about, so it doesn't default to the host-specific tools (e.g.,
# 'otool' on mac to generate the .TOC).
#
# Note that we can't just pass format=['ninja-some_toolchain'] to the
# constructor above, because then this test wouldn't be recognized as a ninja
# format test.
test.formats = ['ninja-my_flavor' if f == 'ninja' else f for f in test.formats]
def TestTargetOverideSharedLib():
# The std output from nm and readelf is redirected to files, so we can't
# expect their output to appear. Instead, check for the files they create to
# see if they actually ran.
expected = ['my_cc.py', 'my_cxx.py', 'FOO']
# Check that CC, CXX, NM, READELF, set target compiler
env = {'CC': 'python %s/my_cc.py FOO' % here,
'CXX': 'python %s/my_cxx.py FOO' % here,
'NM': 'python %s/my_nm.py' % here,
'READELF': 'python %s/my_readelf.py' % here}
with TestGyp.LocalEnv(env):
CheckCompiler(test, 'compiler-shared-lib.gyp', expected, True)
test.must_contain(test.built_file_path('RAN_MY_NM'), 'RAN_MY_NM')
test.must_contain(test.built_file_path('RAN_MY_READELF'), 'RAN_MY_READELF')
test.unlink(test.built_file_path('RAN_MY_NM'))
test.unlink(test.built_file_path('RAN_MY_READELF'))
# Run the same tests once the eviron has been restored. The generated
# projects should have embedded all the settings in the project files so the
# results should be the same.
CheckCompiler(test, 'compiler-shared-lib.gyp', expected, False)
test.must_contain(test.built_file_path('RAN_MY_NM'), 'RAN_MY_NM')
test.must_contain(test.built_file_path('RAN_MY_READELF'), 'RAN_MY_READELF')
TestTargetOverideSharedLib()
test.pass_test()
| bsd-3-clause | bcfb2fa97b0b6ad50f65a2d00d658acf | 35.884615 | 79 | 0.698297 | 3.23622 | false | true | false | false |
djc/couchdb-python | setup.py | 1 | 2269 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import sys
try:
from setuptools import setup
has_setuptools = True
except ImportError:
from distutils.core import setup
has_setuptools = False
# Build setuptools-specific options (if installed).
if not has_setuptools:
print("WARNING: setuptools/distribute not available. Console scripts will not be installed.")
setuptools_options = {}
else:
setuptools_options = {
'entry_points': {
'console_scripts': [
'couchpy = couchdb.view:main',
'couchdb-dump = couchdb.tools.dump:main',
'couchdb-load = couchdb.tools.load:main',
'couchdb-replicate = couchdb.tools.replicate:main',
'couchdb-load-design-doc = couchdb.loader:main',
],
},
'install_requires': [],
'test_suite': 'couchdb.tests.__main__.suite',
'zip_safe': True,
}
setup(
name = 'CouchDB',
version = '1.2.1',
description = 'Python library for working with CouchDB',
long_description = \
"""This is a Python library for CouchDB. It provides a convenient high level
interface for the CouchDB server.""",
author = 'Christopher Lenz',
author_email = 'cmlenz@gmx.de',
maintainer = 'Dirkjan Ochtman',
maintainer_email = 'dirkjan@ochtman.nl',
license = 'BSD',
url = 'https://github.com/djc/couchdb-python/',
classifiers = [
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = ['couchdb', 'couchdb.tools', 'couchdb.tests'],
**setuptools_options
)
| bsd-3-clause | fe60f894f1c943153b2b4f69c95e2767 | 32.367647 | 97 | 0.615249 | 3.852292 | false | false | false | false |
turbulenz/gyp | test/prune_targets/gyptest-prune-targets.py | 47 | 2285 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --root-target removes the unnecessary targets.
"""
import TestGyp
test = TestGyp.TestGyp()
# The xcode-ninja generator has its own logic for which targets to include
if test.format == 'xcode-ninja':
test.skip_test()
build_error_code = {
'cmake': 1,
'make': 2,
'msvs': 1,
'ninja': 1,
'xcode': 65,
}[test.format]
# By default, everything will be included.
test.run_gyp('test1.gyp')
test.build('test2.gyp', 'lib1')
test.build('test2.gyp', 'lib2')
test.build('test2.gyp', 'lib3')
test.build('test2.gyp', 'lib_indirect')
test.build('test1.gyp', 'program1')
test.build('test1.gyp', 'program2')
test.build('test1.gyp', 'program3')
# With deep dependencies of program1 only.
test.run_gyp('test1.gyp', '--root-target=program1')
test.build('test2.gyp', 'lib1')
test.build('test2.gyp', 'lib2', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib_indirect')
test.build('test1.gyp', 'program1')
test.build('test1.gyp', 'program2', status=build_error_code, stderr=None)
test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
# With deep dependencies of program2 only.
test.run_gyp('test1.gyp', '--root-target=program2')
test.build('test2.gyp', 'lib1', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib2')
test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib_indirect')
test.build('test1.gyp', 'program1', status=build_error_code, stderr=None)
test.build('test1.gyp', 'program2')
test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
# With deep dependencies of program1 and program2.
test.run_gyp('test1.gyp', '--root-target=program1', '--root-target=program2')
test.build('test2.gyp', 'lib1')
test.build('test2.gyp', 'lib2')
test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib_indirect')
test.build('test1.gyp', 'program1')
test.build('test1.gyp', 'program2')
test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
test.pass_test()
| bsd-3-clause | 36101a7a1308cffa6f2f76ab9f210beb | 33.621212 | 77 | 0.708096 | 2.831475 | false | true | false | false |
djc/couchdb-python | couchdb/view.py | 1 | 7322 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2008 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Implementation of a view server for functions written in Python."""
from codecs import BOM_UTF8
import logging
import os
import sys
import traceback
from types import FunctionType
from couchdb import json, util
__all__ = ['main', 'run']
__docformat__ = 'restructuredtext en'
log = logging.getLogger('couchdb.view')
def run(input=sys.stdin, output=None):
r"""CouchDB view function handler implementation for Python.
:param input: the readable file-like object to read input from
:param output: the writable file-like object to write output to
"""
functions = []
if output is None:
output = sys.stdout if sys.version_info[0] < 3 else sys.stdout.buffer
def _writejson(obj):
obj = json.encode(obj)
if isinstance(obj, util.utype):
obj = obj.encode('utf-8')
output.write(obj)
output.write(b'\n')
output.flush()
def _log(message):
if not isinstance(message, util.strbase):
message = json.encode(message)
_writejson(['log', message])
def reset(config=None):
del functions[:]
return True
def add_fun(string):
string = BOM_UTF8 + string.encode('utf-8')
globals_ = {}
try:
util.pyexec(string, {'log': _log}, globals_)
except Exception as e:
return ['error',
'map_compilation_error',
e.args[0]
]
err = ['error',
'map_compilation_error',
'string must eval to a function '
'(ex: "def(doc): return 1")'
]
if len(globals_) != 1:
return err
function = list(globals_.values())[0]
if type(function) is not FunctionType:
return err
functions.append(function)
return True
def map_doc(doc):
results = []
for function in functions:
try:
results.append([[key, value] for key, value in function(doc)])
except Exception as e:
log.error('runtime error in map function: %s', e,
exc_info=True)
results.append([])
_log(traceback.format_exc())
return results
def reduce(*cmd, **kwargs):
code = BOM_UTF8 + cmd[0][0].encode('utf-8')
args = cmd[1]
globals_ = {}
try:
util.pyexec(code, {'log': _log}, globals_)
except Exception as e:
log.error('runtime error in reduce function: %s', e,
exc_info=True)
return ['error',
'reduce_compilation_error',
e.args[0]
]
err = ['error',
'reduce_compilation_error',
'string must eval to a function '
'(ex: "def(keys, values): return 1")'
]
if len(globals_) != 1:
return err
function = list(globals_.values())[0]
if type(function) is not FunctionType:
return err
rereduce = kwargs.get('rereduce', False)
results = []
if rereduce:
keys = None
vals = args
else:
if args:
keys, vals = zip(*args)
else:
keys, vals = [], []
if util.funcode(function).co_argcount == 3:
results = function(keys, vals, rereduce)
else:
results = function(keys, vals)
return [True, [results]]
def rereduce(*cmd):
# Note: weird kwargs is for Python 2.5 compat
return reduce(*cmd, **{'rereduce': True})
handlers = {'reset': reset, 'add_fun': add_fun, 'map_doc': map_doc,
'reduce': reduce, 'rereduce': rereduce}
try:
while True:
line = input.readline()
if not line:
break
try:
cmd = json.decode(line)
log.debug('Processing %r', cmd)
except ValueError as e:
log.error('Error: %s', e, exc_info=True)
return 1
else:
retval = handlers[cmd[0]](*cmd[1:])
log.debug('Returning %r', retval)
_writejson(retval)
except KeyboardInterrupt:
return 0
except Exception as e:
log.error('Error: %s', e, exc_info=True)
return 1
_VERSION = """%(name)s - CouchDB Python %(version)s
Copyright (C) 2007 Christopher Lenz <cmlenz@gmx.de>.
"""
_HELP = """Usage: %(name)s [OPTION]
The %(name)s command runs the CouchDB Python view server.
The exit status is 0 for success or 1 for failure.
Options:
--version display version information and exit
-h, --help display a short help message and exit
--json-module=<name> set the JSON module to use ('simplejson', 'cjson',
or 'json' are supported)
--log-file=<file> name of the file to write log messages to, or '-' to
enable logging to the standard error stream
--debug enable debug logging; requires --log-file to be
specified
Report bugs via the web at <https://github.com/djc/couchdb-python/issues>.
"""
def main():
"""Command-line entry point for running the view server."""
import getopt
from couchdb import __version__ as VERSION
try:
option_list, argument_list = getopt.gnu_getopt(
sys.argv[1:], 'h',
['version', 'help', 'json-module=', 'debug', 'log-file=']
)
message = None
for option, value in option_list:
if option in ('--version'):
message = _VERSION % dict(name=os.path.basename(sys.argv[0]),
version=VERSION)
elif option in ('-h', '--help'):
message = _HELP % dict(name=os.path.basename(sys.argv[0]))
elif option in ('--json-module'):
json.use(module=value)
elif option in ('--debug'):
log.setLevel(logging.DEBUG)
elif option in ('--log-file'):
if value == '-':
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(
' -> [%(levelname)s] %(message)s'
))
else:
handler = logging.FileHandler(value)
handler.setFormatter(logging.Formatter(
'[%(asctime)s] [%(levelname)s] %(message)s'
))
log.addHandler(handler)
if message:
sys.stdout.write(message)
sys.stdout.flush()
sys.exit(0)
except getopt.GetoptError as error:
message = '%s\n\nTry `%s --help` for more information.\n' % (
str(error), os.path.basename(sys.argv[0])
)
sys.stderr.write(message)
sys.stderr.flush()
sys.exit(1)
sys.exit(run())
if __name__ == '__main__':
main()
| bsd-3-clause | f16b4a58407af5c1dfeee51ae48ccf28 | 30.157447 | 78 | 0.521989 | 4.186392 | false | false | false | false |
turbulenz/gyp | pylib/gyp/input_test.py | 10 | 3195 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.items():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.items():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.items():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['a']]],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
self.nodes['a'].FindCycles())
self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
[self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
self.assertTrue(
[self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([[self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a']]],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 5401839cb575aca0d3ee313ef0290d75 | 34.5 | 76 | 0.604695 | 3.573826 | false | true | false | false |
jjhelmus/nmrglue | nmrglue/analysis/peakpick.py | 2 | 19775 | """
Peak picking routines, lineshape parameter guessing, and related functions.
"""
import numpy as np
import scipy.ndimage as ndimage
from .analysisbase import ndwindow_index, valid_pt
from .lineshapes1d import gauss, ls_str2class
from .segmentation import find_all_downward, find_all_upward
from .segmentation import find_all_connected, find_all_nconnected
from ..fileio import table
def pick(data, pthres, nthres=None, msep=None, algorithm='connected',
est_params=True, lineshapes=None, edge=None, diag=False, c_struc=None,
c_ndil=0, cluster=True, table=True, axis_names=['A', 'Z', 'Y', 'X']):
"""
Pick (find) peaks in a region of a NMR spectrum.
Parameters
----------
data : ndarray
Region of NMR spectrum to pick peaks from.
pthres : float
Minimum peak height for positive peaks. None to not detect positive
peaks.
nthres : float
Minimum peak height for negative peaks (typically a negative value).
None to not detect negative peaks.
msep : tuple of ints, optional
N-tuple of minimum peak separations along each axis. Must be provided
if algorithm is 'thresh' or 'thresh-fast'.
algorithm : {'thres', thresh-fast', 'downward', 'connected'}, optional
Peak picking algorithm to use. Default is 'connected'.
est_params : bool, optional
True to perform an estimate of linewidths and amplitude for all peaks
picked. False, the default, will return only the peak locations.
lineshapes : list, optional
A list of lineshape classes or string shortcuts for each dimension.
If not specified Gaussian type lineshapes with a FWHM linewidth
parameter is assumed in each dimension. This parameter if only used
if est_params is True.
edge : tuple of ints, optional
Tuple to add to peak locations representing the edge of the region.
None, the default, skips this addition.
diag : bool, optional
True to consider diagonal points to be touching in peak finding
algorithm and clustering.
c_struc : ndarray, optional
Structure element to use when applying dilation on segments before
applying clustering algorithm. None will apply a default square
structure with connectivity one will be applied.
c_ndil : int, optional
Number of dilations to perform on segments before applying clustering
algorithm.
cluster : bool, optional
True to cluster touching peaks. False does not apply clustering.
table : bool, optional
True to return a table. False to return lists.
axis_names : list. optional
List of axis names, the last n will be used for column name prefixes
in table where n is the number of dimensions.
Returns
-------
locations : list, returned when table is False
Peak locations.
cluster_ids : list, returned when table is False and cluster is True
Cluster numbers for peaks.
scales : list, returned when table is False and est_params is True
Estimated peak scales (linewidths).
amps : list, returned when table is False and est_params is True
Estimated peak amplitudes.
table : recarray, returned when table is True
Table of request peak parameters.
"""
####################
# Check parameters #
####################
ndim = len(data.shape)
# check msep
if isinstance(msep, int):
msep = (msep, )
if algorithm in ['thres', 'thres-fast'] and len(msep) != ndim:
raise ValueError("msep has incorrect length")
# check algorithm
if algorithm not in ['thres', 'thres-fast', 'downward', 'connected']:
raise ValueError('Invalid algorithm %s' % (algorithm))
# check lineshapes
if est_params:
# expand None
if lineshapes is None:
lineshapes = [gauss() for i in range(ndim)]
ls_classes = []
# replace strings
for l in lineshapes:
if isinstance(l, str):
ls_classes.append(ls_str2class(l))
else:
ls_classes.append(l)
# check that all classes have 2 parameters
for i, ls in enumerate(ls_classes):
if ls.nparam(10) != 2:
s = "Lineshape class %i does not have two parameters"
raise ValueError(s % (i))
if len(ls_classes) != ndim:
raise ValueError("Incorrect number of lineshapes")
if edge is not None and len(edge) != ndim:
raise ValueError("edge has incorrect length")
#######################
# find positive peaks #
#######################
if pthres is None: # no locations
ploc = []
pseg = []
elif est_params is True: # find locations and segments
if algorithm == 'thres':
ploc, pseg = find_all_thres_fast(data, pthres, msep, True)
elif algorithm == 'thres-fast':
ploc, pseg = find_all_thres_fast(data, pthres, msep, True)
elif algorithm == 'downward':
ploc, pseg = find_all_downward(data, pthres, True, diag)
elif algorithm == 'connected':
ploc, pseg = find_all_connected(data, pthres, True, diag)
else:
raise ValueError('Invalid algorithm %s' % (algorithm))
else: # find only locations
if algorithm == 'thres':
ploc = find_all_thres_fast(data, pthres, msep, False)
elif algorithm == 'thres-fast':
ploc = find_all_thres_fast(data, pthres, msep, False)
elif algorithm == 'downward':
ploc = find_all_downward(data, pthres, False, diag)
elif algorithm == 'connected':
ploc = find_all_connected(data, pthres, False, diag)
else:
raise ValueError('Invalid algorithm %s' % (algorithm))
#######################
# find negative peaks #
#######################
if nthres is None: # no locations
nloc = []
nseg = []
elif est_params is True: # find locations and segments
if algorithm == 'thres':
nloc, nseg = find_all_nthres(data, nthres, msep, True)
elif algorithm == 'thres-fast':
nloc, nseg = find_all_nthres_fast(data, nthres, msep, True)
elif algorithm == 'downward':
nloc, nseg = find_all_upward(data, nthres, True, diag)
elif algorithm == 'connected':
nloc, nseg = find_all_nconnected(data, nthres, True, diag)
else:
raise ValueError('Invalid algorithm %s' % (algorithm))
else: # find only locations
if algorithm == 'thres':
nloc = find_all_nthres(data, nthres, msep, False)
elif algorithm == 'thres-fast':
nloc = find_all_nthres_fast(data, nthres, msep, False)
elif algorithm == 'downward':
nloc = find_all_upward(data, nthres, False, diag)
elif algorithm == 'connected':
nloc = find_all_nconnected(data, nthres, False, diag)
else:
raise ValueError('Invalid algorithm %s' % (algorithm))
# combine the positive and negative peaks
locations = ploc + nloc
#########################################################
# return locations if no parameter estimation requested #
#########################################################
if est_params is False:
if cluster: # find clusters
cluster_ids = clusters(data, locations, pthres, nthres, c_struc,
None, c_ndil)
locations = add_edge(locations, edge)
if table:
return pack_table(locations, cluster_ids,
axis_names=axis_names)
else:
return locations, cluster_ids
else: # Do not determine clusters
locations = add_edge(locations, edge)
if table:
return pack_table(locations, axis_names=axis_names)
else:
return locations
##################################
# estimate scales and amplitudes #
##################################
seg_slices = pseg + nseg
scales = [[]] * len(locations)
amps = [[]] * len(locations)
# scales = np.zeros(np.array(locations).shape,dtype=float)
# amps = np.zeros(len(locations),dtype=float)
for i, (l, seg_slice) in enumerate(zip(locations, seg_slices)):
null, scales[i], amps[i] = guess_params_slice(data, l, seg_slice,
ls_classes)
########################################################
# return locations, scales and amplitudes as requested #
########################################################
if cluster:
cluster_ids = clusters(data, locations, pthres, nthres, c_struc, None,
c_ndil)
locations = add_edge(locations, edge)
if table:
return pack_table(locations, cluster_ids, scales, amps, axis_names)
else:
return locations, cluster_ids, scales, amps
else:
locations = add_edge(locations, edge)
if table:
return pack_table(locations, scales=scales, amps=amps,
axis_names=axis_names)
else:
return locations, scales, amps
def add_edge(locations, edge):
"""
Add edge to list of locations, returning a list of edge-added locations
"""
if edge is None:
return locations
return [tuple([i + j for i, j in zip(edge, l)]) for l in locations]
def clusters(data, locations, pthres, nthres, d_struc=None, l_struc=None,
ndil=0):
"""
Perform cluster analysis of peak locations.
Parameters
----------
data : ndarray
Array of data which has been peak picked.
locations : list
List of peak locations.
pthres : float
Positive peak threshold. None for no positive peaks.
nthres : float
Negative peak threshold. None for no negative peaks.
d_struc : ndarray, optional
Structure of binary dilation to apply on segments before clustering.
None uses a square structure with connectivity of one.
l_struc : ndarray, optional
Structure to use for determining segment connectivity in clustering.
None uses square structure with connectivity of one.
dnil : int, optional
Number of dilation to apply on segments before determining clusters.
Returns
-------
cluster_ids : list
List of cluster number corresponding to peak locations.
"""
# make a binary array of regions above/below the noise thresholds
if pthres is None: # negative peaks only
input = data < nthres
elif nthres is None: # positive peaks only
input = data > pthres
else: # both positive and negative
input = np.bitwise_or(data < nthres, data > pthres)
# apply dialations to these segments
if ndil != 0:
input = ndimage.binary_dilation(input, d_struc, iterations=ndil)
# label this array, these are the clusters.
labeled_array, num_features = ndimage.label(input, l_struc)
return [labeled_array[i] for i in locations]
def pack_table(locations, cluster_ids=None, scales=None, amps=None,
axis_names=["A", "Z", "Y", "X"]):
"""
Create a table from peak information.
Parameters
----------
locations : list
List of peak locations.
cluster_ids : list, optional
List of cluster numbers. None will not include cluster number in the
table.
scales : list, optional
List of peak scales (linewidths). None will not include peak scales in
the table.
amps : list, optional
List of peak amplitudes. None will not include peak amplitudes in the
table.
axis_names : list, optional
List of axis names, the last n will be used for column name prefixes
where n is the number of dimensions.
Returns
-------
table : recarray
nmrglue table with column representing peak parameters. Peak locations
are given column names like 'X_AXIS', 'Y_AXIS', etc. Cluster_ids are
given a column name of 'cID'. Peak scales (linewidths) are given
column names like 'X_LW','Y_LW'. Peak amplitudes are given a column
name of 'VOL'.
"""
ndim = len(locations[0])
anames = axis_names[-ndim:]
dt = [(a + "_AXIS", np.float) for a in anames]
rec = np.rec.array(locations, dtype=dt)
if cluster_ids is not None:
rec = table.append_column(rec, cluster_ids, 'cID', 'int')
if scales is not None:
names = [a + "_LW" for a in anames]
for n, c in zip(names, np.array(scales).T):
rec = table.append_column(rec, c, n, 'float')
if amps is not None:
rec = table.append_column(rec, amps, 'VOL', 'float')
return rec
def guess_params_slice(data, location, seg_slice, ls_classes):
"""
Guess the parameter of a peak in a segment.
Parameters
----------
data : ndarray
NMR data.
location : tuple
Peak locations.
seg_slice : list of slices
List slices which slice data to give the desired segment.
lineshapes : list
List of lineshape classes.
Returns
-------
location : list
Peak locations.
scale : list
Peak scales (linewidths).
amp : list
Peak amplitudes.
"""
# find the rectangular region around the segment
region = data[seg_slice]
edge = [s.start for s in seg_slice]
rlocation = [l - s.start for l, s in zip(location, seg_slice)]
# amptide is estimated by the sum of all points in region
amp = np.sum(region)
scale = [] # list of linewidths
nlocation = [] # list of peak centers
# loop over the axes
for axis, ls in enumerate(ls_classes):
# create the 1D lineshape
r = extract_1d(region, rlocation, axis)
# estimate the linewidth
loc, sc = ls.guessp(r)
scale.append(float(sc))
nlocation.append(float(loc))
return tuple([l + e for l, e in zip(nlocation, edge)]), tuple(scale), amp
def extract_1d(data, location, axis):
"""
Extract a 1D slice from data along axis at location
"""
s = [slice(v, v + 1) for v in location]
s[axis] = slice(None, None)
return np.atleast_1d(np.squeeze(data[tuple(s)]))
# algorithm specific peak picking routines
def find_all_thres(data, thres, msep, find_segs=False):
"""
Peak pick a spectrum using a threshhold-minimum distance algorithm.
Find peaks (local maxima) in a arbitrary dimensional NMR spectra above a
set threshold with a minimal distance between peaks. When the spectrum is
small and multiple copies can fit into RAM use the _fast version of this
function. Segments are found by finding the first point in each direction
along each dimension which is below the threshold.
Parameters
----------
data : ndarray
NMR data.
thres : float
Threshold value for minimum peak height
msep : tuple
Tuple of minimum peak separations along each axis.
find_segs : bool, optional
True to find segments and return a list of slices which select that
segment. False performs no segmentation discovery.
Returns
-------
locations : list
List of peak locations
seg_slices : list, optional
List of slices which extract a region around each peak. Only returned
when find_segs is True.
"""
locations = [] # create an empty list of peak locations
wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1
# loop over the windows
for idx, s in ndwindow_index(data.shape, wsize):
max = data[s].max()
if max == data[idx] and max > thres:
locations.append(idx)
if find_segs:
seg_slices = find_pseg_slice(data, locations, thres)
return locations, seg_slices
else:
return locations
def find_all_nthres(data, thres, msep, find_segs=False):
"""
Peak pick a spectrum using a threshhold-minimum distance algorithm.
Identical to find_all_thres except local minima are found below the
given threshold. See :py:func:`find_all_thres` for a description of the
algorithm and documentation.
"""
locations = [] # create an empty list of peak locations
wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1
# loop over the windows
for idx, s in ndwindow_index(data.shape, wsize):
min = data[s].min()
if min == data[idx] and min < thres:
locations.append(idx)
if find_segs:
seg_slices = find_pseg_slice(data, locations, thres)
return locations, seg_slices
else:
return locations
def find_all_thres_fast(data, thres, msep, find_segs=False):
"""
Fast version of find_all_thres. See :py:func:`find_all_thres`.
"""
wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1
# find local maxima mask
mx = ndimage.maximum_filter(data, size=wsize, mode='constant') == data
# find positive threshold mask
pthres = np.ma.greater(data, thres)
# peaks are bitwise and of maximum mask and threshold mask
locations = np.transpose(np.nonzero(np.bitwise_and(pthres, mx)))
locations = [tuple(i) for i in locations]
if find_segs:
seg_slices = [find_pseg_slice(data, l, thres) for l in locations]
return locations, seg_slices
else:
return locations
def find_all_nthres_fast(data, thres, msep, find_segs=False):
"""
Fast version of find_all_nthres_fast. See :py:func:`find_all_thres`.
"""
wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1
# find local maxima mask
mn = ndimage.minimum_filter(data, size=wsize, mode='constant') == data
# find positive threshold mask
nthres = np.ma.less(data, thres)
# peaks are bitwise and of maximum mask and threshold mask
locations = np.transpose(np.nonzero(np.bitwise_and(nthres, mn)))
locations = [tuple(i) for i in locations]
if find_segs:
seg_slices = [find_pseg_slice(data, l, thres) for l in locations]
return locations, seg_slices
else:
return locations
def find_pseg_slice(data, location, thres):
"""
Find slices which define a segment in data above thres.
"""
shape = data.shape
seg_slice = []
for dim, v in enumerate(location):
# find start value
al = list(location)
start = v
while(valid_pt(al, shape) and data[tuple(al)] > thres):
start = start - 1
al[dim] = start
# find stop value
al = list(location)
stop = v
while(valid_pt(al, shape) and data[tuple(al)] > thres):
stop = stop + 1
al[dim] = stop
seg_slice.append(slice(start + 1, stop))
return seg_slice
def find_nseg_slice(data, location, thres):
"""
Find slices which define a segment in data below thres.
"""
shape = data.shape
seg_slice = []
for dim, v in enumerate(location):
# find start value
al = list(location)
start = v
while(valid_pt(al, shape) and data[tuple(al)] < thres):
start = start - 1
al[dim] = start
# find stop value
al = list(location)
stop = v
while(valid_pt(al, shape) and data[tuple(al)] < thres):
stop = stop + 1
al[dim] = stop
seg_slice.append(slice(start + 1, stop))
return seg_slice
| bsd-3-clause | 99d6728fcc99524ea011a3e02614ca23 | 34.249554 | 79 | 0.600708 | 4.054747 | false | false | false | false |
jjhelmus/nmrglue | tests/pipe_proc_tests/ext.py | 4 | 1927 | #! /usr/bin/env python
""" Create files for ext unit test """
from subprocess import check_call
import nmrglue.fileio.pipe as pipe
import nmrglue.process.pipe_proc as p
d, a = pipe.read("time_complex.fid")
d, a = p.ext(d, a, left=True, sw=True)
pipe.write("ext1.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.ext(d, a, right=True)
pipe.write("ext2.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.ext(d, a, mid=True)
pipe.write("ext3.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.ext(d, a, x1=1, xn=100)
pipe.write("ext4.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.ext(d, a, xn=200, y1=50, yn=75)
pipe.write("ext5.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.ext(d, a, x1=5, xn=200, pow2=True)
pipe.write("ext6.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.ext(d, a, x1=5, xn=200, round=10)
pipe.write("ext7.glue", d, a, overwrite=True)
pipe_command = """\
/bin/csh -c 'nmrPipe -in ./time_complex.fid \
| nmrPipe -fn FT -auto \
-ov -out time-freq.c.ft1'"""
check_call(pipe_command, shell=True)
d, a = pipe.read("time-freq.c.ft1")
d, a = p.ext(d, a, left=True, sw=True)
pipe.write("ext8.glue", d, a, overwrite=True)
d, a = pipe.read("time-freq.c.ft1")
d, a = p.ext(d, a, right=True, sw=True)
pipe.write("ext9.glue", d, a, overwrite=True)
os.remove("time-freq.c.ft1")
pipe_command = """\
/bin/csh -c 'nmrPipe -in ./time_complex.fid \
| nmrPipe -fn FT -auto -di \
-ov -out time-freq.r.ft1'"""
check_call(pipe_command, shell=True)
d, a = pipe.read("time-freq.r.ft1")
d, a = p.ext(d, a, left=True, sw=True)
pipe.write("ext10.glue", d, a, overwrite=True)
d, a = pipe.read("time-freq.r.ft1")
d, a = p.ext(d, a, right=True, sw=True)
pipe.write("ext11.glue", d, a, overwrite=True)
os.remove("time-freq.r.ft1")
| bsd-3-clause | 332dc1c06911dc9afa8cd87ee80c7288 | 26.927536 | 60 | 0.629476 | 2.225173 | false | false | false | false |
jobovy/galpy | galpy/potential/PerfectEllipsoidPotential.py | 1 | 3638 | ###############################################################################
# PerfectEllipsoidPotential.py: Potential of the perfect ellipsoid
# (de Zeeuw 1985):
#
# \rho(x,y,z) ~ 1/(1+m^2)^2
#
# with m^2 = x^2+y^2/b^2+z^2/c^2
#
###############################################################################
import numpy
from ..util import conversion
from .EllipsoidalPotential import EllipsoidalPotential
class PerfectEllipsoidPotential(EllipsoidalPotential):
"""Potential of the perfect ellipsoid (de Zeeuw 1985):
.. math::
\\rho(x,y,z) = \\frac{\\mathrm{amp\\,a}}{\\pi^2\\,bc}\\,\\frac{1}{(m^2+a^2)^2}
where :math:`\\mathrm{amp} = GM` is the total mass and :math:`m^2 = x^2+y^2/b^2+z^2/c^2`.
"""
def __init__(self,amp=1.,a=5.,b=1.,c=1.,
zvec=None,pa=None,glorder=50,
normalize=False,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a perfect ellipsoid potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or G x mass
a - scale radius (can be Quantity)
b - y-to-x axis ratio of the density
c - z-to-x axis ratio of the density
zvec= (None) If set, a unit vector that corresponds to the z axis
pa= (None) If set, the position angle of the x axis (rad or Quantity)
glorder= (50) if set, compute the relevant force and potential integrals with Gaussian quadrature of this order
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2018-08-06 - Started - Bovy (UofT)
"""
EllipsoidalPotential.__init__(self,amp=amp,b=b,c=c,
zvec=zvec,pa=pa,glorder=glorder,
ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
self.a= a
self.a2= self.a**2
self._scale= self.a
# Adjust amp
self._amp*= self.a/(numpy.pi**2*self._b*self._c)
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
self.hasC= not self._glorder is None
self.hasC_dxdv= False
self.hasC_dens= self.hasC # works if mdens is defined, necessary for hasC
return None
def _psi(self,m):
"""\\psi(m) = -\\int_m^\\infty d m^2 \rho(m^2)"""
return -1./(self.a2+m**2)
def _mdens(self,m):
"""Density as a function of m"""
return (self.a2+m**2)**-2
def _mdens_deriv(self,m):
"""Derivative of the density as a function of m"""
return -4.*m*(self.a2+m**2)**-3
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R (and z) for this potential; if z=None, integrate to ellipsoidal boundary
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2021-03-08 - Written - Bovy (UofT)
"""
if not z is None: raise AttributeError # Hack to fall back to general
return 2.*numpy.pi*self._b*self._c/self.a\
*(numpy.arctan(R/self.a)-R*self.a/(1.+R**2.))
| bsd-3-clause | 784a91f31987311202d5723d8c902d88 | 31.774775 | 122 | 0.512644 | 3.60198 | false | false | false | false |
jobovy/galpy | doc/source/examples/dierickx-edist.py | 1 | 2765 | import csv
import os
import os.path
import cPickle as pickle
import numpy as nu
from galpy.orbit import Orbit
from galpy.potential import (HernquistPotential, LogarithmicHaloPotential,
MiyamotoNagaiPotential, NFWPotential)
from galpy.util import plot
_degtorad= nu.pi/180.
def calc_es():
savefilename= 'myes.sav'
if os.path.exists(savefilename):
savefile= open(savefilename,'rb')
mye= pickle.load(savefile)
e= pickle.load(savefile)
savefile.close()
else:
#Read data
dialect= csv.excel
dialect.skipinitialspace=True
reader= csv.reader(open('../data/Dierickx-etal-tab2.txt'),delimiter=' ',dialect=dialect)
vxvs= []
es= []
vphis= []
vxs= []
vys= []
vzs= []
ls= []
for row in reader:
thisra= float(row[3])
thisdec= float(row[4])
thisd= float(row[17])/1000.
thispmra= float(row[13])
thispmdec= float(row[15])
thisvlos= float(row[11])
thise= float(row[26])
vxvs.append([thisra,thisdec,thisd,thispmra,thispmdec,thisvlos])
es.append(thise)
vphis.append(float(row[25]))
vxs.append(float(row[19]))
vys.append(float(row[21]))
vzs.append(float(row[23]))
ls.append(float(row[5]))
vxvv= nu.array(vxvs)
e= nu.array(es)
vphi= nu.array(vphis)
vx= nu.array(vxs)
vy= nu.array(vys)
vz= nu.array(vzs)
l= nu.array(ls)
#Define potential
lp= LogarithmicHaloPotential(normalize=1.)
mp= MiyamotoNagaiPotential(a=0.5,b=0.0375,amp=1.,normalize=.6)
np= NFWPotential(a=4.5,normalize=.35)
hp= HernquistPotential(a=0.6/8,normalize=0.05)
ts= nu.linspace(0.,20.,10000)
mye= nu.zeros(len(e))
for ii in range(len(e)):
#Integrate the orbit
o= Orbit(vxvv[ii,:],radec=True,vo=220.,ro=8.)
o.integrate(ts,lp)
mye[ii]= o.e()
#Save
savefile= open(savefilename,'wb')
pickle.dump(mye,savefile)
pickle.dump(e,savefile)
savefile.close()
#plot
plot.print()
plot.plot(nu.array([0.,1.]),nu.array([0.,1.]),'k-',
xlabel=r'$\mathrm{Dierickx\ et\ al.}\ e$',
ylabel=r'$\mathrm{galpy}\ e$')
plot.plot(e,mye,'k,',overplot=True)
plot.end_print('myee.png')
plot.print()
plot.hist(e,bins=30,xlabel=r'$\mathrm{Dierickx\ et\ al.}\ e$')
plot.end_print('ehist.png')
plot.print()
plot.hist(mye,bins=30,xlabel=r'$\mathrm{galpy}\ e$')
plot.end_print('myehist.png')
if __name__ == '__main__':
calc_es()
| bsd-3-clause | 6a73e997c6757a5c20ca926d8a3ed7f2 | 28.414894 | 96 | 0.552622 | 2.995666 | false | false | false | false |
jobovy/galpy | galpy/potential/AnySphericalPotential.py | 1 | 4735 | ###############################################################################
# AnySphericalPotential: Potential of an arbitrary spherical density
###############################################################################
import numpy
from scipy import integrate
from ..util import conversion
from ..util._optional_deps import _APY_LOADED
from .SphericalPotential import SphericalPotential
if _APY_LOADED:
from astropy import units
class AnySphericalPotential(SphericalPotential):
"""Class that implements the potential of an arbitrary spherical density distribution :math:`\\rho(r)`"""
def __init__(self,dens=lambda r: 0.64/r/(1+r)**3,amp=1.,normalize=False,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize the potential of an arbitrary spherical density distribution
INPUT:
dens= (0.64/r/(1+r)**3) function of a single variable that gives the density as a function of radius (can return a Quantity)
amp= (1.) amplitude to be applied to the potential
normalize - if True, normalize such that vc(1.,0.)=1., or, if
given as a number, such that the force is this fraction
of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2021-01-05 - Written - Bovy (UofT)
"""
SphericalPotential.__init__(self,amp=amp,ro=ro,vo=vo)
# Parse density: does it have units? does it expect them?
if _APY_LOADED:
_dens_unit_input= False
try:
dens(1)
except (units.UnitConversionError,units.UnitTypeError):
_dens_unit_input= True
_dens_unit_output= False
if _dens_unit_input:
try:
dens(1.*units.kpc).to(units.Msun/units.pc**3)
except (AttributeError,units.UnitConversionError): pass
else: _dens_unit_output= True
else:
try:
dens(1.).to(units.Msun/units.pc**3)
except (AttributeError,units.UnitConversionError): pass
else: _dens_unit_output= True
if _dens_unit_input and _dens_unit_output:
self._rawdens= lambda R: conversion.parse_dens(\
dens(R*self._ro*units.kpc),
ro=self._ro,vo=self._vo)
elif _dens_unit_input:
self._rawdens= lambda R: dens(R*self._ro*units.kpc)
elif _dens_unit_output:
self._rawdens= lambda R: conversion.parse_dens(dens(R),
ro=self._ro,
vo=self._vo)
if not hasattr(self,'_rawdens'): # unitless
self._rawdens= dens
self._rawmass= lambda r: 4.*numpy.pi\
*integrate.quad(lambda a: a**2*self._rawdens(a),0,r)[0]
# The potential at zero, try to figure out whether it's finite
_zero_msg= integrate.quad(lambda a: a*self._rawdens(a),
0,numpy.inf,full_output=True)[-1]
_infpotzero= 'divergent' in _zero_msg or 'maximum number' in _zero_msg
self._pot_zero= -numpy.inf if _infpotzero \
else -4.*numpy.pi\
*integrate.quad(lambda a: a*self._rawdens(a),0,numpy.inf)[0]
# The potential at infinity
_infmass= 'divergent' \
in integrate.quad(lambda a: a**2.*self._rawdens(a),
0,numpy.inf,full_output=True)[-1]
self._pot_inf= 0. if not _infmass else numpy.inf
# Normalize?
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
return None
def _revaluate(self,r,t=0.):
"""Potential as a function of r and time"""
if r == 0:
return self._pot_zero
elif numpy.isinf(r):
return self._pot_inf
else:
return -self._rawmass(r)/r\
-4.*numpy.pi*integrate.quad(lambda a: self._rawdens(a)*a,
r,numpy.inf)[0]
def _rforce(self,r,t=0.):
return -self._rawmass(r)/r**2
def _r2deriv(self,r,t=0.):
return -2*self._rawmass(r)/r**3.+4.*numpy.pi*self._rawdens(r)
def _rdens(self,r,t=0.):
return self._rawdens(r)
| bsd-3-clause | d9b01e08cf2aca655c83de6626e7bca2 | 39.470085 | 135 | 0.51679 | 4.005922 | false | false | false | false |
jobovy/galpy | galpy/actionAngle/actionAngleTorus.py | 1 | 9521 | ###############################################################################
# class: actionAngleTorus
#
# Use McMillan, Binney, and Dehnen's Torus code to calculate (x,v)
# given actions and angles
#
#
###############################################################################
import warnings
import numpy
from ..potential import MWPotential, _isNonAxi
from ..potential.Potential import _check_c
from ..potential.Potential import flatten as flatten_potential
from ..util import galpyWarning
from . import actionAngleTorus_c
from .actionAngleTorus_c import _ext_loaded as ext_loaded
_autofit_errvals= {}
_autofit_errvals[-1]= 'something wrong with input, usually bad starting values for the parameters'
_autofit_errvals[-2]= 'Fit failed the goal by a factor <= 2'
_autofit_errvals[-3]= 'Fit failed the goal by more than 2'
_autofit_errvals[-4]= 'Fit aborted: serious problems occured'
class actionAngleTorus:
"""Action-angle formalism using the Torus machinery"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleTorus object
INPUT:
pot= potential or list of potentials (3D)
tol= default tolerance to use when fitting tori (|dJ|/J)
dJ= default action difference when computing derivatives (Hessian or Jacobian)
OUTPUT:
instance
HISTORY:
2015-08-07 - Written - Bovy (UofT)
"""
if not 'pot' in kwargs: #pragma: no cover
raise OSError("Must specify pot= for actionAngleTorus")
self._pot= flatten_potential(kwargs['pot'])
if _isNonAxi(self._pot):
raise RuntimeError("actionAngleTorus for non-axisymmetric potentials is not supported")
if self._pot == MWPotential:
warnings.warn("Use of MWPotential as a Milky-Way-like potential is deprecated; galpy.potential.MWPotential2014, a potential fit to a large variety of dynamical constraints (see Bovy 2015), is the preferred Milky-Way-like potential in galpy",
galpyWarning)
if ext_loaded:
self._c= _check_c(self._pot)
if not self._c:
raise RuntimeError('The given potential is not fully implemented in C; using the actionAngleTorus code is not supported in pure Python')
else:# pragma: no cover
raise RuntimeError('actionAngleTorus instances cannot be used, because the actionAngleTorus_c extension failed to load')
self._tol= kwargs.get('tol',0.001)
self._dJ= kwargs.get('dJ',0.001)
return None
def __call__(self,jr,jphi,jz,angler,anglephi,anglez,**kwargs):
"""
NAME:
__call__
PURPOSE:
evaluate the phase-space coordinates (x,v) for a number of angles on a single torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (object-wide value) goal for |dJ|/|J| along the torus
OUTPUT:
[R,vR,vT,z,vz,phi]
HISTORY:
2015-08-07 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_xvFreqs_c(\
self._pot,
jr,jphi,jz,
angler,anglephi,anglez,
tol=kwargs.get('tol',self._tol))
if out[9] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[9],_autofit_errvals[out[9]]),
galpyWarning)
return numpy.array(out[:6]).T
def xvFreqs(self,jr,jphi,jz,angler,anglephi,anglez,**kwargs):
"""
NAME:
xvFreqs
PURPOSE:
evaluate the phase-space coordinates (x,v) for a number of angles on a single torus as well as the frequencies
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (object-wide value) goal for |dJ|/|J| along the torus
OUTPUT:
([R,vR,vT,z,vz,phi],OmegaR,Omegaphi,Omegaz,AutoFit error message)
HISTORY:
2015-08-07 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_xvFreqs_c(\
self._pot,
jr,jphi,jz,
angler,anglephi,anglez,
tol=kwargs.get('tol',self._tol))
if out[9] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[9],_autofit_errvals[out[9]]),
galpyWarning)
return (numpy.array(out[:6]).T,out[6],out[7],out[8],out[9])
def Freqs(self,jr,jphi,jz,**kwargs):
"""
NAME:
Freqs
PURPOSE:
return the frequencies corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
OUTPUT:
(OmegaR,Omegaphi,Omegaz)
HISTORY:
2015-08-07 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_Freqs_c(\
self._pot,
jr,jphi,jz,
tol=kwargs.get('tol',self._tol))
if out[3] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[3],_autofit_errvals[out[3]]),
galpyWarning)
return out
def hessianFreqs(self,jr,jphi,jz,**kwargs):
"""
NAME:
hessianFreqs
PURPOSE:
return the Hessian d Omega / d J and frequencies Omega corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian)
nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error message)
HISTORY:
2016-07-15 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_hessian_c(\
self._pot,
jr,jphi,jz,
tol=kwargs.get('tol',self._tol),
dJ=kwargs.get('dJ',self._dJ))
if out[4] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[4],_autofit_errvals[out[4]]),
galpyWarning)
# Re-arrange frequencies and actions to r,phi,z
out[0][:,:]= out[0][:,[0,2,1]]
out[0][:,:]= out[0][[0,2,1]]
if kwargs.get('nosym',False):
return out
else :# explicitly symmetrize
return (0.5*(out[0]+out[0].T),out[1],out[2],out[3],out[4])
def xvJacobianFreqs(self,jr,jphi,jz,angler,anglephi,anglez,**kwargs):
"""
NAME:
xvJacobianFreqs
PURPOSE:
return [R,vR,vT,z,vz,phi], the Jacobian d [R,vR,vT,z,vz,phi] / d (J,angle), the Hessian dO/dJ, and frequencies Omega corresponding to a torus at multiple sets of angles
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (object-wide value) goal for |dJ|/|J| along the torus
dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian)
nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors)
OUTPUT:
([R,vR,vT,z,vz,phi], [N,6] array
d[R,vR,vT,z,vz,phi]/d[J,angle], --> (N,6,6) array
dO/dJ, --> (3,3) array
Omegar,Omegaphi,Omegaz, [N] arrays
Autofit error message)
HISTORY:
2016-07-19 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_jacobian_c(\
self._pot,
jr,jphi,jz,
angler,anglephi,anglez,
tol=kwargs.get('tol',self._tol),
dJ=kwargs.get('dJ',self._dJ))
if out[11] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[11],_autofit_errvals[out[11]]),
galpyWarning)
# Re-arrange actions,angles to r,phi,z
out[6][:,:,:]= out[6][:,:,[0,2,1,3,5,4]]
out[7][:,:]= out[7][:,[0,2,1]]
out[7][:,:]= out[7][[0,2,1]]
# Re-arrange x,v to R,vR,vT,z,vz,phi
out[6][:,:]= out[6][:,[0,3,5,1,4,2]]
if not kwargs.get('nosym',False):
# explicitly symmetrize
out[7][:]= 0.5*(out[7]+out[7].T)
return (numpy.array(out[:6]).T,out[6],out[7],
out[8],out[9],out[10],out[11])
| bsd-3-clause | fa08b5737213eecad0ce3dce4223c3a5 | 29.225397 | 253 | 0.553093 | 3.509399 | false | false | false | false |
jobovy/galpy | galpy/potential/BurkertPotential.py | 1 | 4240 | ###############################################################################
# BurkertPotential.py: Potential with a Burkert density
###############################################################################
import numpy
from scipy import special
from ..util import conversion
from .SphericalPotential import SphericalPotential
class BurkertPotential(SphericalPotential):
"""BurkertPotential.py: Potential with a Burkert density
.. math::
\\rho(r) = \\frac{\\mathrm{amp}}{(1+r/a)\\,(1+[r/a]^2)}
"""
def __init__(self,amp=1.,a=2.,normalize=False,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a Burkert-density potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass density or Gxmass density
a = scale radius (can be Quantity)
normalize - if True, normalize such that vc(1.,0.)=1., or, if
given as a number, such that the force is this fraction
of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2013-04-10 - Written - Bovy (IAS)
2020-03-30 - Re-implemented using SphericalPotential - Bovy (UofT)
"""
SphericalPotential.__init__(self,amp=amp,ro=ro,vo=vo,
amp_units='density')
a= conversion.parse_length(a,ro=self._ro,vo=self._vo)
self.a=a
self._scale= self.a
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
self.hasC= True
self.hasC_dxdv= True
self.hasC_dens= True
return None
def _revaluate(self,r,t=0.):
"""Potential as a function of r and time"""
x= r/self.a
return -self.a**2.*numpy.pi*(-numpy.pi/x+2.*(1./x+1)*numpy.arctan(1/x)
+(1./x+1)*numpy.log((1.+1./x)**2./(1.+1/x**2.))
+special.xlogy(2./x,1.+x**2.))
#Previous way, not stable as r -> infty
#return -self.a**2.*numpy.pi/x*(-numpy.pi+2.*(1.+x)*numpy.arctan(1/x)
# +2.*(1.+x)*numpy.log(1.+x)
# +(1.-x)*numpy.log(1.+x**2.))
def _rforce(self,r,t=0.):
x= r/self.a
return self.a*numpy.pi/x**2.*(numpy.pi-2.*numpy.arctan(1./x)
-2.*numpy.log(1.+x)-numpy.log(1.+x**2.))
def _r2deriv(self,r,t=0.):
x= r/self.a
return 4.*numpy.pi/(1.+x**2.)/(1.+x)+2.*self._rforce(r)/x/self.a
def _rdens(self,r,t=0.):
x= r/self.a
return 1./(1.+x)/(1.+x**2.)
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
x= r/self.a
Rpa= numpy.sqrt(R**2.+self.a**2.)
Rma= numpy.sqrt(R**2.-self.a**2.+0j)
if Rma == 0:
za= z/self.a
return self.a**2./2.*((2.-2.*numpy.sqrt(za**2.+1)
+numpy.sqrt(2.)*za\
*numpy.arctan(za/numpy.sqrt(2.)))/z
+numpy.sqrt(2*za**2.+2.)\
*numpy.arctanh(za/numpy.sqrt(2.*(za**2.+1)))
/numpy.sqrt(self.a**2.+z**2.))
else:
return self.a**2.*(numpy.arctan(z/x/Rma)/Rma
+numpy.arctanh(z/x/Rpa)/Rpa
-numpy.arctan(z/Rma)/Rma
+numpy.arctan(z/Rpa)/Rpa).real
| bsd-3-clause | e73e1f79b05d3ceb3ec2c867d6d9bc78 | 33.471545 | 134 | 0.461792 | 3.533333 | false | false | false | false |
jobovy/galpy | galpy/actionAngle/actionAngleVertical.py | 1 | 6501 | ###############################################################################
# actionAngle: a Python module to calculate actions, angles, and frequencies
#
# class: actionAngleVertical
#
# methods:
# Jz
# anglez
# Tz
# calczmax
# calcEz
###############################################################################
import numpy
from scipy import integrate, optimize
from ..potential.linearPotential import evaluatelinearPotentials
from .actionAngle import actionAngle
class actionAngleVertical(actionAngle):
"""Action-angle formalism for vertical integral using the adiabatic approximation"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleVertical object
INPUT:
Either:
a) z,vz
b) Orbit instance: initial condition used if that's it, orbit(t)
if there is a time given as well
pot= potential or list of potentials (planarPotentials)
OUTPUT:
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
self._parse_eval_args(*args,_noOrbUnitsCheck=True,**kwargs)
self._z= self._eval_z
self._vz= self._eval_vz
if not 'pot' in kwargs: #pragma: no cover
raise OSError("Must specify pot= for actionAngleVertical")
self._verticalpot= kwargs['pot']
return None
def Jz(self,**kwargs):
"""
NAME:
Jz
PURPOSE:
Calculate the vertical action
INPUT:
+scipy.integrate.quad keywords
OUTPUT:
J_z(z,vz)/ro/vc + estimate of the error
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if hasattr(self,'_Jz'):
return self._Jz
zmax= self.calczmax()
if zmax == -9999.99: return numpy.array([9999.99,numpy.nan])
Ez= calcEz(self._z,self._vz,self._verticalpot)
self._Jz= 2.*integrate.quad(_JzIntegrand,0.,zmax,
args=(Ez,self._verticalpot),
**kwargs)[0]/numpy.pi
return self._Jz
def Tz(self,**kwargs): #pragma: no cover
"""
NAME:
Tz
PURPOSE:
Calculate the vertical period
INPUT:
+scipy.integrate.quad keywords
OUTPUT:
T_z(z,vz)*vc/ro + estimate of the error
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if hasattr(self,'_Tz'):
return self._Tz
zmax= self.calczmax()
Ez= calcEz(self._z,self._vz,self._verticalpot)
self._Tz= 4.*integrate.quad(_TzIntegrand,0.,zmax,
args=(Ez,self._verticalpot),
**kwargs)[0]
return self._Tz
def anglez(self,**kwargs): #pragma: no cover
"""
NAME:
anglez
PURPOSE:
Calculate the vertical angle
INPUT:
+scipy.integrate.quad keywords
OUTPUT:
angle_z(z,vz)*vc/ro + estimate of the error
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if hasattr(self,'_anglez'):
return self._anglez
zmax= self.calczmax()
Ez= calcEz(self._z,self._vz,self._verticalpot)
Tz= self.Tz(**kwargs)
self._anglez= 2.*numpy.pi*(numpy.array(integrate.quad(_TzIntegrand,0.,numpy.fabs(self._z),
args=(Ez,self._verticalpot),
**kwargs)))/Tz[0]
if self._z >= 0. and self._vz >= 0.:
pass
elif self._z >= 0. and self._vz < 0.:
self._anglez[0]= numpy.pi-self._anglez[0]
elif self._z < 0. and self._vz <= 0.:
self._anglez[0]= numpy.pi+self._anglez[0]
else:
self._anglez[0]= 2.*numpy.pi-self._anglez[0]
return self._anglez
def calczmax(self):
"""
NAME:
calczmax
PURPOSE:
calculate the maximum height
INPUT:
OUTPUT:
zmax
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if hasattr(self,'_zmax'): #pragma: no cover
return self._zmax
Ez= calcEz(self._z,self._vz,self._verticalpot)
if self._vz == 0.: #We are exactly at the maximum height
zmax= numpy.fabs(self._z)
else:
zstart= self._z
try:
zend= _zmaxFindStart(self._z,Ez,self._verticalpot)
except OverflowError: #pragma: no cover
zmax= -9999.99
else:
zmax= optimize.brentq(_zmaxEq,zstart,zend,
(Ez,self._verticalpot))
self._zmax= zmax
return self._zmax
def _zmaxEq(z,Ez,pot):
"""The vz=0 equation that needs to be solved to find zmax"""
return Ez-potentialVertical(z,pot)
def calcEz(z,vz,pot):
"""
NAME:
calcEz
PURPOSE:
calculate the vertical energy
INPUT:
z - height (/ro)
vz - vertical part of the velocity (/vc)
pot - potential
OUTPUT:
Ez
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
return potentialVertical(z,pot)+vz**2./2.
def potentialVertical(z,pot):
"""
NAME:
potentialVertical
PURPOSE:
return the potential
INPUT:
z - height (/ro)
pot - potential
OUTPUT:
Phi_z(z)
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
return evaluatelinearPotentials(pot,z,use_physical=False)
def _JzIntegrand(z,Ez,pot):
"""The J_z integrand"""
return numpy.sqrt(2.*(Ez-potentialVertical(z,pot)))
def _TzIntegrand(z,Ez,pot): #pragma: no cover
"""The T_z integrand"""
return 1./_JzIntegrand(z,Ez,pot)
def _zmaxFindStart(z,Ez,pot):
"""
NAME:
_zmaxFindStart
PURPOSE:
Find adequate end point to solve for zmax
INPUT:
z - height
Ez - vertical energy
pot - potential
OUTPUT:
zend
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if z == 0.: ztry= 0.00001
else: ztry= 2.*numpy.fabs(z)
while (Ez-potentialVertical(ztry,pot)) > 0.:
ztry*= 2.
if ztry > 100.: #pragma: no cover
raise OverflowError
return ztry
| bsd-3-clause | cb6c35370fa939c39e03c25236fa4397 | 28.958525 | 98 | 0.50423 | 3.817381 | false | false | false | false |
jobovy/galpy | galpy/potential/AnyAxisymmetricRazorThinDiskPotential.py | 1 | 9827 | ###############################################################################
# AnyAxisymmetricRazorThinDiskPotential.py: class that implements the
# potential of an arbitrary
# axisymmetric, razor-thin disk
###############################################################################
import numpy
from scipy import integrate, special
from ..util import conversion
from ..util._optional_deps import _APY_LOADED
from .Potential import Potential, check_potential_inputs_not_arrays
if _APY_LOADED:
from astropy import units
class AnyAxisymmetricRazorThinDiskPotential(Potential):
r"""Class that implements the potential of an arbitrary axisymmetric, razor-thin disk with surface density :math:`\Sigma(R)`"""
def __init__(self,surfdens=lambda R: 1.5*numpy.exp(-3.*R),amp=1.,
normalize=False,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize the potential of an arbitrary axisymmetric disk
INPUT:
surfdens= (1.5 e^[-R/0.3]) function of a single variable that gives the surface density as a function of radius (can return a Quantity)
amp= (1.) amplitude to be applied to the potential
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
AnyAxisymmetricRazorThinDiskPotential object
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo)
# Parse surface density: does it have units? does it expect them?
if _APY_LOADED:
_sdens_unit_input= False
try:
surfdens(1)
except (units.UnitConversionError,units.UnitTypeError):
_sdens_unit_input= True
_sdens_unit_output= False
if _sdens_unit_input:
try:
surfdens(1.*units.kpc).to(units.Msun/units.pc**2)
except (AttributeError,units.UnitConversionError): pass
else: _sdens_unit_output= True
else:
try:
surfdens(1.).to(units.Msun/units.pc**2)
except (AttributeError,units.UnitConversionError): pass
else: _sdens_unit_output= True
if _sdens_unit_input and _sdens_unit_output:
self._sdens= lambda R: conversion.parse_surfdens(\
surfdens(R*self._ro*units.kpc),
ro=self._ro,vo=self._vo)
elif _sdens_unit_input:
self._sdens= lambda R: surfdens(R*self._ro*units.kpc)
elif _sdens_unit_output:
self._sdens= lambda R: conversion.parse_surfdens(surfdens(R),
ro=self._ro,
vo=self._vo)
if not hasattr(self,'_sdens'): # unitless
self._sdens= surfdens
# The potential at zero, in case it's asked for
self._pot_zero= -2.*numpy.pi*integrate.quad(lambda a: self._sdens(a),
0,numpy.inf)[0]
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
@check_potential_inputs_not_arrays
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
if R == 0 and z == 0:
return self._pot_zero
elif numpy.isinf(R**2+z**2):
return 0.
potint= lambda a: a*self._sdens(a)\
/numpy.sqrt((R+a)**2.+z**2.)*special.ellipk(4*R*a/((R+a)**2.+z**2.))
return -4*(integrate.quad(potint,0,2*R,points=[R])[0]
+integrate.quad(potint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
F_R at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
R2= R**2
z2= z**2
def rforceint(a):
a2= a**2
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*((a2-R2+z2)*special.ellipe(faRoveraRz)
-((a-R)**2+z2)*special.ellipk(faRoveraRz))\
/R/((a-R)**2+z2)/numpy.sqrt(aRz)
return 2*(integrate.quad(rforceint,0,2*R,points=[R])[0]
+integrate.quad(rforceint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
F_z at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
if z == 0:
return 0.
z2= z**2
def zforceint(a):
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*special.ellipe(faRoveraRz)/((a-R)**2+z2)/numpy.sqrt(aRz)
return -4*z*(integrate.quad(zforceint,0,2*R,points=[R])[0]
+integrate.quad(zforceint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the 2nd radial derivative at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2 Phi / dR2 at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
R2= R**2
z2= z**2
def r2derivint(a):
a2= a**2
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*(-(((a2-3.*R2)*(a2-R2)**2+(3.*a2**2+2.*a2*R2+3.*R2**2)*z2
+(3.*a2+7.*R2)*z**4+z**6)*special.ellipe(faRoveraRz))
+((a-R)**2+z2)*((a2-R2)**2+2.*(a2+2.*R2)*z2+z**4)
*special.ellipk(faRoveraRz))\
/(2.*R2*((a-R)**2+z2)**2*((a+R)**2+z2)**1.5)
return -4*(integrate.quad(r2derivint,0,2*R,points=[R])[0]
+integrate.quad(r2derivint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the 2nd vertical derivative at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2 Phi / dz2 at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
R2= R**2
z2= z**2
def z2derivint(a):
a2= a**2
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*(-(((a2-R2)**2-2.*(a2+R2)*z2-3.*z**4)*special.ellipe(faRoveraRz))
-z2*((a-R)**2+z2)*special.ellipk(faRoveraRz))\
/(((a-R)**2+z2)**2*((a+R)**2+z2)**1.5)
return -4*(integrate.quad(z2derivint,0,2*R,points=[R])[0]
+integrate.quad(z2derivint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed radial, vertical derivative at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2 Phi / dRdz at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
R2= R**2
z2= z**2
def rzderivint(a):
a2= a**2
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*(-((a**4-7.*R**4-6.*R2*z2+z**4+2.*a2*(3.*R2+z2))
*special.ellipe(faRoveraRz))
+((a-R)**2+z**2)*(a2-R2+z2)*special.ellipk(faRoveraRz))\
/R/((a-R)**2+z2)**2/((a+R)**2+z2)**1.5
return -2*z*(integrate.quad(rzderivint,0,2*R,points=[R])[0]
+integrate.quad(rzderivint,2*R,numpy.inf)[0])
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Sigma (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
return self._sdens(R)
| bsd-3-clause | 05cfffbde9eef66387875a741ecdf59e | 34.222222 | 172 | 0.479597 | 3.352781 | false | false | false | false |
jobovy/galpy | galpy/potential/Cautun20.py | 1 | 3326 | # Cautun (2020) potential
# Thanks to Thomas Callingham (Durham University, UK) which implemented the potential within galpy
import numpy
from ..potential import (AdiabaticContractionWrapperPotential,
DiskSCFPotential, NFWPotential,
PowerSphericalPotentialwCutoff, SCFPotential,
mwpot_helpers, scf_compute_coeffs_axi)
from ..util import conversion
# Suppress the numpy floating-point warnings that this code generates...
old_error_settings= numpy.seterr(all='ignore')
# Unit normalizations
ro= 8.122
vo= 229
sigo = conversion.surfdens_in_msolpc2(vo=vo,ro=ro)
rhoo = conversion.dens_in_msolpc3(vo=vo,ro=ro)
#Cautun DM halo
fb = 4.825 / 30.7 # Planck 1 baryon fraction
m200 = 0.969e12 # the DM halo mass
conc = 8.76
#Cautun Bulge
r0_bulge = 0.075/ro
rcut_bulge= 2.1/ro
rho0_bulge= 103/rhoo
#Cautun Stellar Discs
zd_thin = 0.3/ro
Rd_thin =2.63/ro
Sigma0_thin= 731./sigo
zd_thick = 0.9/ro
Rd_thick = 3.80/ro
Sigma0_thick= 101./sigo
#Cautun Gas Discs
Rd_HI= 7./ro
Rm_HI= 4./ro
zd_HI= 0.085/ro
Sigma0_HI= 53/sigo
Rd_H2= 1.5/ro
Rm_H2= 12./ro
zd_H2= 0.045/ro
Sigma0_H2= 2200/sigo
# Cautun CGM
A = 0.19
Beta = -1.46
critz0 = 127.5e-9/rhoo
R200 = 219/ro #R200 for cgm
cgm_amp = 200 * critz0 * A * fb
def gas_dens(R,z):
return \
mwpot_helpers.expsech2_dens_with_hole(R,z,Rd_HI,Rm_HI,zd_HI,Sigma0_HI)\
+mwpot_helpers.expsech2_dens_with_hole(R,z,Rd_H2,Rm_H2,zd_H2,Sigma0_H2)
def stellar_dens(R,z):
return mwpot_helpers.expexp_dens(R,z,Rd_thin,zd_thin,Sigma0_thin)\
+mwpot_helpers.expexp_dens(R,z,Rd_thick,zd_thick,Sigma0_thick)
def bulge_dens(R,z):
return mwpot_helpers.core_pow_dens_with_cut(R,z,1.8,r0_bulge,rcut_bulge,
rho0_bulge,0.5)
#dicts used in DiskSCFPotential
sigmadict = [{'type':'exp','h':Rd_HI,'amp':Sigma0_HI, 'Rhole':Rm_HI},
{'type':'exp','h':Rd_H2,'amp':Sigma0_H2, 'Rhole':Rm_H2},
{'type':'exp','h':Rd_thin,'amp':Sigma0_thin, 'Rhole':0.},
{'type':'exp','h':Rd_thick,'amp':Sigma0_thick, 'Rhole':0.}]
hzdict = [{'type':'sech2', 'h':zd_HI},
{'type':'sech2', 'h':zd_H2},
{'type':'exp', 'h':zd_thin},
{'type':'exp', 'h':zd_thick}]
#generate separate disk and halo potential - and combined potential
Cautun_bulge= SCFPotential(\
Acos=scf_compute_coeffs_axi(bulge_dens,20,10,a=0.1)[0],
a=0.1,ro=ro,vo=vo)
Cautun_cgm= PowerSphericalPotentialwCutoff(amp=cgm_amp,alpha=-Beta,
r1=R200,rc=2.*R200,ro=ro,vo=vo)
Cautun_disk= DiskSCFPotential(\
dens=lambda R,z: gas_dens(R,z) + stellar_dens(R,z),
Sigma=sigmadict,hz=hzdict,a=2.5,N=30,L=30,ro=ro,vo=vo)
Cautun_halo= AdiabaticContractionWrapperPotential(\
pot=NFWPotential(conc=conc,mvir=m200/1.e12,
vo=vo,ro=ro,H=67.77,Om=0.307,
overdens=200.0*(1.-fb),wrtcrit=True),
baryonpot=Cautun_bulge+Cautun_cgm+Cautun_disk,
f_bar=fb,method='cautun',ro=ro,vo=vo)
Cautun20= Cautun_halo+Cautun_disk+Cautun_bulge+Cautun_cgm
# Go back to old floating-point warnings settings
numpy.seterr(**old_error_settings)
| bsd-3-clause | a95fa0973e4e0e70a9e80bac97400989 | 35.955556 | 98 | 0.615153 | 2.493253 | false | false | false | false |
jobovy/galpy | galpy/potential/McMillan17.py | 1 | 2549 | # McMillan (2017) potential as first implemented in the galpy framework by
# Mackereth & Bovy (2018)
import numpy
from ..potential import (DiskSCFPotential, NFWPotential, SCFPotential,
mwpot_helpers, scf_compute_coeffs_axi)
from ..util import conversion
# Suppress the numpy floating-point warnings that this code generates...
old_error_settings= numpy.seterr(all='ignore')
# Unit normalizations
ro= 8.21
vo= 233.1
sigo= conversion.surfdens_in_msolpc2(vo=vo,ro=ro)
rhoo= conversion.dens_in_msolpc3(vo=vo,ro=ro)
#gas disk parameters (fixed in McMillan 2017...)
Rd_HI= 7./ro
Rm_HI= 4./ro
zd_HI= 0.085/ro
Sigma0_HI= 53.1/sigo
Rd_H2= 1.5/ro
Rm_H2= 12./ro
zd_H2= 0.045/ro
Sigma0_H2= 2180./sigo
#parameters of best-fitting model in McMillan (2017)
#stellar disks
Sigma0_thin= 896./sigo
Rd_thin= 2.5/ro
zd_thin= 0.3/ro
Sigma0_thick= 183./sigo
Rd_thick= 3.02/ro
zd_thick= 0.9/ro
#bulge
rho0_bulge= 98.4/rhoo
r0_bulge= 0.075/ro
rcut= 2.1/ro
#DM halo
rho0_halo= 0.00854/rhoo
rh= 19.6/ro
def gas_dens(R,z):
return \
mwpot_helpers.expsech2_dens_with_hole(R,z,Rd_HI,Rm_HI,zd_HI,Sigma0_HI)\
+mwpot_helpers.expsech2_dens_with_hole(R,z,Rd_H2,Rm_H2,zd_H2,Sigma0_H2)
def stellar_dens(R,z):
return mwpot_helpers.expexp_dens(R,z,Rd_thin,zd_thin,Sigma0_thin)\
+mwpot_helpers.expexp_dens(R,z,Rd_thick,zd_thick,Sigma0_thick)
def bulge_dens(R,z):
return mwpot_helpers.core_pow_dens_with_cut(R,z,1.8,r0_bulge,rcut,
rho0_bulge,0.5)
#dicts used in DiskSCFPotential
sigmadict = [{'type':'exp','h':Rd_HI,'amp':Sigma0_HI, 'Rhole':Rm_HI},
{'type':'exp','h':Rd_H2,'amp':Sigma0_H2, 'Rhole':Rm_H2},
{'type':'exp','h':Rd_thin,'amp':Sigma0_thin},
{'type':'exp','h':Rd_thick,'amp':Sigma0_thick}]
hzdict = [{'type':'sech2', 'h':zd_HI},
{'type':'sech2', 'h':zd_H2},
{'type':'exp', 'h':zd_thin},
{'type':'exp', 'h':zd_thick}]
#generate separate disk and halo potential - and combined potential
McMillan_bulge= SCFPotential(\
Acos=scf_compute_coeffs_axi(bulge_dens,20,10,a=0.1)[0],
a=0.1,ro=ro,vo=vo)
McMillan_disk= DiskSCFPotential(\
dens=lambda R,z: gas_dens(R,z)+stellar_dens(R,z),
Sigma=sigmadict,hz=hzdict,a=2.5,N=30,L=30,ro=ro,vo=vo)
McMillan_halo= NFWPotential(amp=rho0_halo*(4*numpy.pi*rh**3),
a=rh,ro=ro,vo=vo)
# Go back to old floating-point warnings settings
numpy.seterr(**old_error_settings)
McMillan17= McMillan_disk+McMillan_halo+McMillan_bulge
| bsd-3-clause | d65cc1c04cb8c830e93daa3d898ead49 | 32.539474 | 79 | 0.652413 | 2.334249 | false | false | false | false |
jobovy/galpy | galpy/potential/RingPotential.py | 1 | 6425 | ###############################################################################
# RingPotential.py: The gravitational potential of a thin, circular ring
###############################################################################
import numpy
from scipy import special
from ..util import conversion
from .Potential import Potential
class RingPotential(Potential):
"""Class that implements the potential of an infinitesimally-thin, circular ring
.. math::
\\rho(R,z) = \\frac{\\mathrm{amp}}{2\\pi\\,R_0}\\,\\delta(R-R_0)\\,\\delta(z)
with :math:`\\mathrm{amp} = GM` the mass of the ring.
"""
def __init__(self,amp=1.,a=0.75,normalize=False,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a circular ring potential
INPUT:
amp - mass of the ring (default: 1); can be a Quantity with units of mass or Gxmass
a= (0.75) radius of the ring (can be Quantity)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.; note that because the force is always positive at r < a, this does not work if a > 1
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
self.a= a
self.a2= self.a**2
self._amp/= 2.*numpy.pi*self.a
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)):
if self.a > 1.:
raise ValueError('RingPotential with normalize= for a > 1 is not supported (because the force is always positive at r=1)')
self.normalize(normalize)
self.hasC= False
self.hasC_dxdv= False
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
# Stable as r -> infty
m= 4.*self.a/((numpy.sqrt(R)+self.a/numpy.sqrt(R))**2+z**2/R)
return -4.*self.a/numpy.sqrt((R+self.a)**2+z**2)*special.ellipk(m)
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
m= 4.*R*self.a/((R+self.a)**2+z**2)
return -2.*self.a/R/numpy.sqrt((R+self.a)**2+z**2)\
*(m*(R**2-self.a2-z**2)/4./(1.-m)/self.a/R*special.ellipe(m)
+special.ellipk(m))
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
m= 4.*R*self.a/((R+self.a)**2+z**2)
return -4.*z*self.a/(1.-m)*((R+self.a)**2+z**2)**-1.5*special.ellipe(m)
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rderiv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Raz2= (R+self.a)**2+z**2
Raz= numpy.sqrt(Raz2)
m= 4.*R*self.a/Raz2
R2ma2mz2o4aR1m= (R**2-self.a2-z**2)/4./self.a/R/(1.-m)
return (2*R**2+self.a2+3*R*self.a+z**2)/R/Raz2*self._Rforce(R,z)\
+2.*self.a/R/Raz*(m*(R**2+self.a2+z**2)/4./(1.-m)/self.a/R**2\
*special.ellipe(m)\
+(R2ma2mz2o4aR1m/(1.-m)*special.ellipe(m)
+0.5*R2ma2mz2o4aR1m*(special.ellipe(m)-special.ellipk(m))
+0.5*(special.ellipe(m)/(1.-m)-special.ellipk(m))/m)\
*4*self.a*(self.a2+z**2-R**2)/Raz2**2)
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Raz2= (R+self.a)**2+z**2
m= 4.*R*self.a/Raz2
# Explicitly swapped in zforce here, so the z/z can be cancelled
# and z=0 is handled properly
return -4.*(3.*z**2/Raz2-1.
+4.*((1.+m)/(1.-m)-special.ellipk(m)/special.ellipe(m))\
*self.a*R*z**2/Raz2**2/m)\
*self.a/(1.-m)*((R+self.a)**2+z**2)**-1.5*special.ellipe(m)
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Raz2= (R+self.a)**2+z**2
m= 4.*R*self.a/Raz2
return (3.*(R+self.a)/Raz2
-2.*((1.+m)/(1.-m)-special.ellipk(m)/special.ellipe(m))\
*self.a*(self.a2+z**2-R**2)/Raz2**2/m)*self._zforce(R,z)
| bsd-3-clause | 5b5b89e65be86f23e220671d24260424 | 31.780612 | 258 | 0.494163 | 3.28141 | false | false | false | false |
jobovy/galpy | galpy/df/constantbetaHernquistdf.py | 1 | 3488 | # Class that implements the anisotropic spherical Hernquist DF with constant
# beta parameter
import numpy
import scipy.integrate
import scipy.special
from ..potential import HernquistPotential, evaluatePotentials
from ..util import conversion
from .constantbetadf import _constantbetadf
class constantbetaHernquistdf(_constantbetadf):
"""Class that implements the anisotropic spherical Hernquist DF with constant beta parameter"""
def __init__(self,pot=None,beta=0,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a Hernquist DF with constant anisotropy
INPUT:
pot - Hernquist potential which determines the DF
beta - anisotropy parameter
OUTPUT:
None
HISTORY:
2020-07-22 - Written - Lane (UofT)
"""
assert isinstance(pot,HernquistPotential),'pot= must be potential.HernquistPotential'
_constantbetadf.__init__(self,pot=pot,beta=beta,ro=ro,vo=vo)
self._psi0= -evaluatePotentials(self._pot,0,0,use_physical=False)
self._potInf= 0.
self._GMa = self._psi0*self._pot.a**2.
# Final factor is mass to make the DF that of the mass density
self._fEnorm= (2.**self._beta/(2.*numpy.pi)**2.5)\
*scipy.special.gamma(5.-2.*self._beta)\
/scipy.special.gamma(1.-self._beta)\
/scipy.special.gamma(3.5-self._beta)\
/self._GMa**(1.5-self._beta)\
*self._psi0*self._pot.a
def fE(self,E):
"""
NAME:
fE
PURPOSE
Calculate the energy portion of a Hernquist distribution function
INPUT:
E - The energy (can be Quantity)
OUTPUT:
fE - The value of the energy portion of the DF
HISTORY:
2020-07-22 - Written
"""
Etilde= -conversion.parse_energy(E,vo=self._vo)/self._psi0
# Handle potential E outside of bounds
Etilde_out = numpy.where(numpy.logical_or(Etilde<0,Etilde>1))[0]
if len(Etilde_out)>0:
# Dummy variable now and 0 later, prevents numerical issues?
Etilde[Etilde_out]=0.5
# First check algebraic solutions, all adjusted such that DF = mass den
if self._beta == 0.: # isotropic case
sqrtEtilde= numpy.sqrt(Etilde)
fE= self._psi0*self._pot.a\
/numpy.sqrt(2.)/(2*numpy.pi)**3/self._GMa**1.5\
*sqrtEtilde/(1-Etilde)**2.\
*((1.-2.*Etilde)*(8.*Etilde**2.-8.*Etilde-3.)\
+((3.*numpy.arcsin(sqrtEtilde))\
/numpy.sqrt(Etilde*(1.-Etilde))))
elif self._beta == 0.5:
fE= (3.*Etilde**2.)/(4.*numpy.pi**3.*self._pot.a)
elif self._beta == -0.5:
fE= ((20.*Etilde**3.-20.*Etilde**4.+6.*Etilde**5.)\
/(1.-Etilde)**4)/(4.*numpy.pi**3.*self._GMa*self._pot.a)
else:
fE= self._fEnorm*numpy.power(Etilde,2.5-self._beta)*\
scipy.special.hyp2f1(5.-2.*self._beta,1.-2.*self._beta,
3.5-self._beta,Etilde)
if len(Etilde_out) > 0:
fE[Etilde_out]= 0.
return fE
def _icmf(self,ms):
'''Analytic expression for the normalized inverse cumulative mass
function. The argument ms is normalized mass fraction [0,1]'''
return self._pot.a*numpy.sqrt(ms)/(1-numpy.sqrt(ms))
| bsd-3-clause | a80bab8a8b6e416cc7c092f1c2b2dd5d | 32.219048 | 99 | 0.564507 | 3.450049 | false | false | false | false |
jobovy/galpy | galpy/util/ars.py | 1 | 14076 | #############################################################################
#Copyright (c) 2011, Jo Bovy
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
#OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
#AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
#WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
#############################################################################
import numpy
import scipy.stats as stats
#TO DO:
#Throw errors in the sample_hull routine
def ars(domain,isDomainFinite,abcissae,hx,hpx,nsamples=1,
hxparams=(),maxn=100):
"""ars: Implementation of the Adaptive-Rejection Sampling
algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling
for Gibbs Sampling, Applied Statistics, 41, 337
Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection
Sampling from Log-concave Density Functions, Applied Statistics, 42, 701
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded
hx - function that evaluates h(x) = ln g(x)
hpx - function that evaluates hp(x) = d h(x) / d x
nsamples - (optional) number of desired samples (default=1)
hxparams - (optional) a tuple of parameters for h(x) and h'(x)
maxn - (optional) maximum number of updates to the hull (default=100)
Output:
list with nsamples of samples from exp(h(x))
External dependencies:
math
scipy
scipy.stats
History:
2009-05-21 - Written - Bovy (NYU)
"""
#First set-up the upper and lower hulls
hull=setup_hull(domain,isDomainFinite,abcissae,hx,hpx,hxparams)
#Then start sampling: call sampleone repeatedly
out= []
nupdates= 0
for ii in range(int(nsamples)):
thissample, hull, nupdates= sampleone(hull,hx,hpx,domain,isDomainFinite,maxn,nupdates,hxparams)
out.append(thissample)
return out
def setup_hull(domain,isDomainFinite,abcissae,hx,hpx,hxparams):
"""setup_hull: set up the upper and lower hull and everything that
comes with that
Input:
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
abcissae - initial list of abcissae (must lie on either side
of the peak in hx if the domain is unbounded
hx - function that evaluates h(x)
hpx - function that evaluates hp(x)
hxparams - tuple of parameters for h(x) and h'(x)
Output:
list with:
[0]= c_u
[1]= xs
[2]= h(xs)
[3]= hp(xs)
[4]= zs
[5]= s_cum
[6]= hu(zi)
History:
2009-05-21 - Written - Bovy (NYU)
"""
nx= len(abcissae)
#Create the output arrays
xs= numpy.zeros(nx)
hxs= numpy.zeros(nx)
hpxs= numpy.zeros(nx)
zs= numpy.zeros(nx-1)
scum= numpy.zeros(nx-1)
hus= numpy.zeros(nx-1)
#Function evaluations
xs= numpy.sort(abcissae)
for ii in range(nx):
hxs[ii]= hx(xs[ii],hxparams)
hpxs[ii]= hpx(xs[ii],hxparams)
#THERE IS NO CHECKING HERE TO SEE WHETHER IN THE INFINITE DOMAIN CASE
#WE HAVE ABCISSAE ON BOTH SIDES OF THE PEAK
#zi
for jj in range(nx-1):
zs[jj]= (hxs[jj+1]-hxs[jj]-xs[jj+1]*hpxs[jj+1]+xs[jj]*hpxs[jj])/(
hpxs[jj]-hpxs[jj+1])
#hu
for jj in range(nx-1):
hus[jj]= hpxs[jj]*(zs[jj]-xs[jj])+hxs[jj]
#Calculate cu and scum
if isDomainFinite[0]:
scum[0]= 1./hpxs[0]*(numpy.exp(hus[0])-numpy.exp(
hpxs[0]*(domain[0]-xs[0])+hxs[0]))
else:
scum[0]= 1./hpxs[0]*numpy.exp(hus[0])
if nx > 2:
for jj in range(nx-2):
if hpxs[jj+1] == 0.:
scum[jj+1]= (zs[jj+1]-zs[jj])*numpy.exp(hxs[jj+1])
else:
scum[jj+1]=1./hpxs[jj+1]*(numpy.exp(hus[jj+1])-numpy.exp(hus[jj]))
if isDomainFinite[1]:
cu=1./hpxs[nx-1]*(numpy.exp(hpxs[nx-1]*(
domain[1]-xs[nx-1])+hxs[nx-1]) - numpy.exp(hus[nx-2]))
else:
cu=- 1./hpxs[nx-1]*numpy.exp(hus[nx-2])
cu= cu+numpy.sum(scum)
scum= numpy.cumsum(scum)/cu
out=[]
out.append(cu)
out.append(xs)
out.append(hxs)
out.append(hpxs)
out.append(zs)
out.append(scum)
out.append(hus)
return out
def sampleone(hull,hx,hpx,domain,isDomainFinite,maxn,nupdates,hxparams):
"""sampleone: sample one point by ars
Input:
hull - the hull (see doc of setup_hull for definition)
hx - function that evaluates h(x)
hpx - function that evaluates hp(x)
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
maxn - maximum number of updates to the hull
nupdates - number of updates to the hull that have occurred
hxparams - tuple of parameters for h(x) and h'(x)
Output:
a sample
a new hull
nupdates
History:
2009-05-21 - Written - Bovy (NYU)
"""
thishull= hull
noSampleYet= True
while noSampleYet:
#Sample a candidate from the upper hull
candidate= sample_hull(thishull,domain,isDomainFinite)
thishux, thishlx= evaluate_hull(candidate,thishull)
u= stats.uniform.rvs()
if u < numpy.exp(thishlx-thishux):
thissample= candidate
noSampleYet= False
else:
thishx= hx(candidate,hxparams)
if u < numpy.exp(thishx-thishux):
thissample= candidate
noSampleYet= False
if nupdates < maxn:
thishpx= hpx(candidate,hxparams)
thishull= update_hull(thishull,candidate,thishx,thishpx,
domain,isDomainFinite)
nupdates= nupdates+1
return thissample, thishull, nupdates
def sample_hull(hull,domain,isDomainFinite):
"""sample_hull: Sample the upper hull
Input:
hull - hull structure (see setup_hull for a definition of this)
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
Output:
a sample from the hull
History:
2009-05-21 - Written - Bovy
"""
u= stats.uniform.rvs()
#Find largest zs[jj] such that scum[jj] < u
#The first bin is a special case
if hull[5][0] >= u:
if hull[3][0] == 0:
if isDomainFinite[0]:
thissample= domain[0]+u/hull[5][0]*(hull[4][0]-domain[0])
else:
thissample= 100000000 #Throw some kind of error
else:
thissample= hull[4][0]+1./hull[3][0]*numpy.log(1.-hull[3][0]*hull[0]*(hull[5][0]-u)/numpy.exp(hull[6][0]))
else:
if len(hull[5]) == 1:
indx= 0
else:
indx= 1
while indx < len(hull[5]) and hull[5][indx] < u:
indx= indx+1
indx= indx-1
if numpy.fabs(hull[3][indx+1]) == 0:
if indx != (len(hull[5])-1):
thissample= hull[4][indx]+(u-hull[5][indx])/(hull[5][indx+1]-hull[5][indx])*(hull[4][indx+1]-hull[4][indx])
else:
if isDomainFinite[1]:
thissample= hull[4][indx]+(u-hull[5][indx])/(1.-hull[5][indx])*(domain[1]-hull[4][indx])
else:
thissample= 100000 #Throw some kind of error
else:
thissample= hull[4][indx]+1./hull[3][indx+1]*numpy.log(1.+hull[3][indx+1]*hull[0]*(u-hull[5][indx])/numpy.exp(hull[6][indx]))
return thissample
def evaluate_hull(x,hull):
"""evaluate_hull: evaluate h_u(x) and (optional) h_l(x)
Input:
x - abcissa
hull - the hull (see setup_hull for a definition)
Output:
hu(x) (optional), hl(x)
History:
2009-05-21 - Written - Bovy (NYU)
"""
#Find in which [z_{i-1},z_i] interval x lies
if x < hull[4][0]:
#x lies in the first interval
hux= hull[3][0]*(x-hull[1][0])+hull[2][0]
indx= 0
else:
if len(hull[5]) == 1:
#There are only two intervals
indx= 1
else:
indx= 1
while indx < len(hull[4]) and hull[4][indx] < x:
indx= indx+1
indx= indx-1
hux= hull[3][indx]*(x-hull[1][indx])+hull[2][indx]
#Now evaluate hlx
neginf= numpy.finfo(numpy.dtype(numpy.float64)).min
if x < hull[1][0] or x > hull[1][-1]:
hlx= neginf
else:
if indx == 0:
hlx= ((hull[1][1]-x)*hull[2][0]+(x-hull[1][0])*hull[2][1])/(hull[1][1]-hull[1][0])
elif indx == len(hull[4]):
hlx= ((hull[1][-1]-x)*hull[2][-2]+(x-hull[1][-2])*hull[2][-1])/(hull[1][-1]-hull[1][-2])
elif x < hull[1][indx+1]:
hlx= ((hull[1][indx+1]-x)*hull[2][indx]+(x-hull[1][indx])*hull[2][indx+1])/(hull[1][indx+1]-hull[1][indx])
else:
hlx= ((hull[1][indx+2]-x)*hull[2][indx+1]+(x-hull[1][indx+1])*hull[2][indx+2])/(hull[1][indx+2]-hull[1][indx+1])
return hux, hlx
def update_hull(hull,newx,newhx,newhpx,domain,isDomainFinite):
"""update_hull: update the hull with a new function evaluation
Input:
hull - the current hull (see setup_hull for a definition)
newx - a new abcissa
newhx - h(newx)
newhpx - hp(newx)
domain - [.,.] upper and lower limit to the domain
isDomainFinite - [.,.] is there a lower/upper limit to the domain?
Output:
newhull
History:
2009-05-21 - Written - Bovy (NYU)
"""
#BOVY: Perhaps add a check that newx is sufficiently far from any existing point
#Find where newx fits in with the other xs
if newx > hull[1][-1]:
newxs= numpy.append(hull[1],newx)
newhxs= numpy.append(hull[2],newhx)
newhpxs= numpy.append(hull[3],newhpx)
#new z
newz= ( newhx - hull[2][-1] - newx*newhpx + hull[1][-1]*hull[3][-1])/( hull[3][-1] - newhpx)
newzs= numpy.append(hull[4],newz)
#New hu
newhu= hull[3][-1]*(newz-hull[1][-1]) + hull[2][-1]
newhus= numpy.append(hull[6],newhu)
else:
indx= 0
while newx > hull[1][indx]:
indx=indx+1
newxs= numpy.insert(hull[1],indx,newx)
newhxs= numpy.insert(hull[2],indx,newhx)
newhpxs= numpy.insert(hull[3],indx,newhpx)
#Replace old z with new zs
if newx < hull[1][0]:
newz= (hull[2][0]-newhx-hull[1][0]*hull[3][0]+newx*newhpx)/(newhpx-hull[3][0])
newzs= numpy.insert(hull[4],0,newz)
#Also add the new hu
newhu= newhpx*(newz-newx)+newhx
newhus= numpy.insert(hull[6],0,newhu)
else:
newz1= (newhx-hull[2][indx-1] - newx*newhpx+hull[1][indx-1]*hull[3][indx-1])/(hull[3][indx-1]-newhpx)
newz2= (hull[2][indx]-newhx - hull[1][indx]*hull[3][indx]+newx*newhpx)/(newhpx-hull[3][indx])
#Insert newz1 and replace z_old
newzs= numpy.insert(hull[4],indx-1,newz1)
newzs[indx]= newz2
#Update the hus
newhu1= hull[3][indx-1]*(newz1-hull[1][indx-1])+hull[2][indx-1]
newhu2= newhpx*(newz2-newx)+newhx
newhus= numpy.insert(hull[6],indx-1,newhu1)
newhus[indx]= newhu2
#Recalculate the cumulative sum
nx= len(newxs)
newscum= numpy.zeros(nx-1)
if isDomainFinite[0]:
newscum[0]= 1./newhpxs[0]*(numpy.exp(newhus[0])-numpy.exp(
newhpxs[0]*(domain[0]-newxs[0])+newhxs[0]))
else:
newscum[0]= 1./newhpxs[0]*numpy.exp(newhus[0])
if nx > 2:
for jj in range(nx-2):
if newhpxs[jj+1] == 0.:
newscum[jj+1]= (newzs[jj+1]-newzs[jj])*numpy.exp(newhxs[jj+1])
else:
newscum[jj+1]=1./newhpxs[jj+1]*(numpy.exp(newhus[jj+1])-numpy.exp(newhus[jj]))
if isDomainFinite[1]:
newcu=1./newhpxs[nx-1]*(numpy.exp(newhpxs[nx-1]*(
domain[1]-newxs[nx-1])+newhxs[nx-1]) - numpy.exp(newhus[nx-2]))
else:
newcu=- 1./newhpxs[nx-1]*numpy.exp(newhus[nx-2])
newcu= newcu+numpy.sum(newscum)
newscum= numpy.cumsum(newscum)/newcu
newhull=[]
newhull.append(newcu)
newhull.append(newxs)
newhull.append(newhxs)
newhull.append(newhpxs)
newhull.append(newzs)
newhull.append(newscum)
newhull.append(newhus)
return newhull
| bsd-3-clause | 8a7c2a5c67783feff22e832e81ef3d03 | 36.43617 | 137 | 0.574737 | 3.038204 | false | false | false | false |
jobovy/galpy | galpy/util/symplecticode.py | 1 | 4554 | #############################################################################
#Symplectic ODE integrators
#Follows scipy.integrate.odeint inputs as much as possible
#############################################################################
#############################################################################
#Copyright (c) 2011, Jo Bovy
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
#OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
#AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
#WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
#############################################################################
import numpy
_MAX_DT_REDUCE= 10000.
def leapfrog(func,yo,t,args=(),rtol=1.49012e-12,atol=1.49012e-12):
"""
NAME:
leapfrog
PURPOSE:
leapfrog integrate an ode
INPUT:
func - force function of (y,*args)
yo - initial condition [q,p]
t - set of times at which one wants the result
rtol, atol
OUTPUT:
y : array, shape (len(y0), len(t))
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
HISTORY:
2011-02-02 - Written - Bovy (NYU)
"""
#Initialize
qo= yo[0:len(yo)//2]
po= yo[len(yo)//2:len(yo)]
out= numpy.zeros((len(t),len(yo)))
out[0,:]= yo
#Estimate necessary step size
dt= t[1]-t[0] #assumes that the steps are equally spaced
init_dt= dt
dt= _leapfrog_estimate_step(func,qo,po,dt,t[0],args,rtol,atol)
ndt= int(init_dt/dt)
#Integrate
to= t[0]
for ii in range(1,len(t)):
for jj in range(ndt): #loop over number of sub-intervals
#This could be made faster by combining the drifts
#drift
q12= leapfrog_leapq(qo,po,dt/2.)
#kick
force= func(q12,*args,t=to+dt/2)
po= leapfrog_leapp(po,dt,force)
#drift
qo= leapfrog_leapq(q12,po,dt/2.)
#Get ready for next
to+= dt
out[ii,0:len(yo)//2]= qo
out[ii,len(yo)//2:len(yo)]= po
return out
def leapfrog_leapq(q,p,dt):
return q+dt*p
def leapfrog_leapp(p,dt,force):
return p+dt*force
def _leapfrog_estimate_step(func,qo,po,dt,to,args,rtol,atol):
init_dt= dt
qmax= numpy.amax(numpy.fabs(qo))+numpy.zeros(len(qo))
pmax= numpy.amax(numpy.fabs(po))+numpy.zeros(len(po))
scale= atol+rtol*numpy.array([qmax,pmax]).flatten()
err= 2.
dt*= 2.
while err > 1. and init_dt/dt < _MAX_DT_REDUCE:
#Do one leapfrog step with step dt and one with dt/2.
#dt
q12= leapfrog_leapq(qo,po,dt/2.)
force= func(q12,*args,t=to+dt/2)
p11= leapfrog_leapp(po,dt,force)
q11= leapfrog_leapq(q12,p11,dt/2.)
#dt/2.
q12= leapfrog_leapq(qo,po,dt/4.)
force= func(q12,*args,t=to+dt/4)
ptmp= leapfrog_leapp(po,dt/2.,force)
qtmp= leapfrog_leapq(q12,ptmp,dt/2.)#Take full step combining two half
force= func(qtmp,*args,t=to+3.*dt/4)
p12= leapfrog_leapp(ptmp,dt/2.,force)
q12= leapfrog_leapq(qtmp,p12,dt/4.)
#Norm
delta= numpy.array([numpy.fabs(q11-q12),numpy.fabs(p11-p12)]).flatten()
err= numpy.sqrt(numpy.mean((delta/scale)**2.))
dt/= 2.
return dt
| bsd-3-clause | 6ad6b18aba06e31b59e03fe2cd0b608a | 38.947368 | 79 | 0.610233 | 3.385874 | false | false | false | false |
jobovy/galpy | galpy/potential/SolidBodyRotationWrapperPotential.py | 1 | 2266 | ###############################################################################
# SolidBodyRotationWrapperPotential.py: Wrapper to make a potential rotate
# with a fixed pattern speed, around
# the z axis
###############################################################################
from ..util import conversion
from .WrapperPotential import parentWrapperPotential
class SolidBodyRotationWrapperPotential(parentWrapperPotential):
"""Potential wrapper class that implements solid-body rotation around the z-axis. Can be used to make a bar or other perturbation rotate. The potential is rotated by replacing
.. math::
\\phi \\rightarrow \\phi + \\Omega \\times t + \\mathrm{pa}
with :math:`\\Omega` the fixed pattern speed and :math:`\\mathrm{pa}` the position angle at :math:`t=0`.
"""
def __init__(self,amp=1.,pot=None,omega=1.,pa=0.,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a SolidBodyRotationWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; this potential is made to rotate around the z axis by the wrapper
omega= (1.) the pattern speed (can be a Quantity)
pa= (0.) the position angle (can be a Quantity)
OUTPUT:
(none)
HISTORY:
2017-08-22 - Started - Bovy (UofT)
"""
omega= conversion.parse_frequency(omega,ro=self._ro,vo=self._vo)
pa= conversion.parse_angle(pa)
self._omega= omega
self._pa= pa
self.hasC= True
self.hasC_dxdv= True
def OmegaP(self):
"""
NAME:
OmegaP
PURPOSE:
return the pattern speed
INPUT:
(none)
OUTPUT:
pattern speed
HISTORY:
2016-11-02 - Written - Bovy (UofT)
"""
return self._omega
def _wrap(self,attribute,*args,**kwargs):
kwargs['phi']= \
kwargs.get('phi',0.)-self._omega*kwargs.get('t',0.)-self._pa
return self._wrap_pot_func(attribute)(self._pot,*args,**kwargs)
| bsd-3-clause | c7e3f342dcd7f7c98fbb28fc4155dbdf | 30.041096 | 179 | 0.530891 | 4.307985 | false | false | false | false |
jobovy/galpy | galpy/potential/AdiabaticContractionWrapperPotential.py | 1 | 7099 | ###############################################################################
# AdiabaticContractionWrapperPotential.py: Wrapper to adiabatically
# contract a DM halo in response
# to the growth of a baryonic
# component
###############################################################################
import numpy
from scipy import integrate
from scipy.interpolate import interp1d
from scipy.optimize import fixed_point
from ..util import conversion
from .Force import Force
from .interpSphericalPotential import interpSphericalPotential
# Note: not actually implemented as a WrapperPotential!
class AdiabaticContractionWrapperPotential(interpSphericalPotential):
"""AdiabaticContractionWrapperPotential: Wrapper to adiabatically contract a DM halo in response to the growth of a baryonic component. Use for example as::
dm= AdiabaticContractionWrapperPotential(pot=MWPotential2014[2],baryonpot=MWPotential2014[:2])
to contract the dark-matter halo in MWPotential2014 according to the baryon distribution within it. The basic physics of the adiabatic contraction is that a fraction `f_bar` of the mass in the original potential `pot` cools adiabatically to form a baryonic component `baryonpot`; this wrapper computes the resulting dark-matter potential using different approximations in the literature.
"""
def __init__(self,amp=1.,pot=None,baryonpot=None,
method='cautun',f_bar=0.157,rmin=None,rmax=50.,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a AdiabaticContractionWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof representing the density that is adiabatically contracted
baryonpot - Potential instance or list thereof representing the density of baryons whose growth causes the contraction
method= ('cautun') Type of adiabatic-contraction formula:
* 'cautun' for that from Cautun et al. 2020 (`2020MNRAS.494.4291C <https://ui.adsabs.harvard.edu/abs/2020MNRAS.494.4291C>`__),
* 'blumenthal' for that from Blumenthal et al. 1986 (`1986ApJ...301...27B 1986ApJ...301...27B <https://ui.adsabs.harvard.edu/abs/1986ApJ...301...27B>`__)
* 'gnedin' for that from Gnedin et al. 2004 (`2004ApJ...616...16G <https://ui.adsabs.harvard.edu/abs/2004ApJ...616...16G>`__)
f_bar= (0.157) universal baryon fraction; if None, calculated from pot and baryonpot assuming that at rmax the halo contains the universal baryon fraction; leave this at the default value unless you know what you are doing
rmin= (None) minimum radius to consider (default: rmax/2500; don't set this to zero)
rmax= (50.) maximum radius to consider (can be Quantity)
ro, vo= standard unit-conversion parameters
OUTPUT:
(none)
HISTORY:
2021-03-21 - Started based on Marius Cautun's code - Bovy (UofT)
"""
# Initialize with Force just to parse (ro,vo)
Force.__init__(self,ro=ro,vo=vo)
rmax= conversion.parse_length(rmax,ro=self._ro)
rmin= conversion.parse_length(rmin,ro=self._ro) if not rmin is None \
else rmax/2500.
# Compute baryon and DM enclosed masses on radial grid
from ..potential import mass
rgrid= numpy.geomspace(rmin,rmax,301)
baryon_mass= numpy.array([mass(baryonpot,r,use_physical=False)
for r in rgrid])
dm_mass= numpy.array([mass(pot,r,use_physical=False)
for r in rgrid])
# Adiabatic contraction
if f_bar is None:
f_bar= baryon_mass[-1]/(baryon_mass[-1]+dm_mass[-1])
if method.lower() == 'cautun':
new_rforce= _contraction_Cautun2020(rgrid,dm_mass,baryon_mass,
f_bar)
elif method.lower() == 'gnedin':
new_rforce= \
_contraction_Gnedin2004(rgrid,dm_mass,baryon_mass,
pot.rvir(overdens=180.,
wrtcrit=False),
f_bar)
elif method.lower() == 'blumenthal':
new_rforce= _contraction_Blumenthal1986(rgrid,dm_mass,
baryon_mass,f_bar)
else: # pragma: no cover
raise ValueError(f"Adiabatic contraction method '{method}' not recognized")
# Add central point
rgrid= numpy.concatenate(([0.],rgrid))
new_rforce= numpy.concatenate(([0.],new_rforce))
new_rforce_func= lambda r: -numpy.interp(r,rgrid,new_rforce)
# Potential at zero = int_0^inf dr rforce, and enc. mass constant
# outside of last rgrid point
Phi0= integrate.quad(new_rforce_func,rgrid[0],rgrid[-1])[0]\
-new_rforce[-1]*rgrid[-1]
interpSphericalPotential.__init__(self,
rforce=new_rforce_func,
rgrid=rgrid,
Phi0=Phi0,
ro=ro,vo=vo)
def _contraction_Cautun2020(r,M_DMO,Mbar,fbar):
# solve for the contracted enclosed DM mass
func_M_DM_contract= lambda M: M_DMO*1.023*(M_DMO/(1.-fbar)/(M+Mbar))**-0.54
M_DM= fixed_point(func_M_DM_contract,M_DMO)
return M_DM/M_DMO*M_DMO/r**2.
def _contraction_Blumenthal1986(r,M_DMO,Mbar,fbar):
# solve for the contracted radius 'rf' containing the same DM mass
# as enclosed for r
func_M_bar= interp1d(r,Mbar,bounds_error=False,
fill_value=(Mbar[0],Mbar[-1]) )
func_r_contract= lambda rf: r*(M_DMO/(1.-fbar))/(M_DMO+func_M_bar(rf))
rf= fixed_point(func_r_contract,r)
# now find how much the enclosed mass increased at r
func_M_DM= interp1d(rf,M_DMO,bounds_error=False,
fill_value=(M_DMO[0],M_DMO[-1]))
return func_M_DM(r)/r**2.
def _contraction_Gnedin2004(r,M_DMO,M_bar,Rvir,fbar):
# solve for the contracted radius 'rf' containing the same DM mass
# as enclosed for r
func_M_bar= interp1d(r,M_bar,bounds_error=False,
fill_value=(M_bar[0],M_bar[-1]))
func_M_DMO= interp1d(r,M_DMO,bounds_error=False,
fill_value=(M_DMO[0],M_DMO[-1]))
A, w= 0.85, 0.8
func_r_mean= lambda ri: A*Rvir*(ri/Rvir)**w
M_DMO_rmean= func_M_DMO(func_r_mean(r))
func_r_contract= lambda rf: r*(M_DMO_rmean/(1.-fbar))\
/(M_DMO_rmean+func_M_bar(func_r_mean(rf)))
rf= fixed_point(func_r_contract,r)
# now find how much the enclosed mass increased at r
func_M_DM = interp1d(rf,M_DMO,bounds_error=False,
fill_value=(M_DMO[0],M_DMO[-1]))
return func_M_DM(r)/r**2.
| bsd-3-clause | 9da6afa1de9a307b96cc24006920633c | 46.966216 | 387 | 0.58473 | 3.464617 | false | false | false | false |
sergiocorreia/panflute | panflute/table_elements.py | 1 | 13625 | """
Classes corresponding to Pandoc Table elements
"""
# ---------------------------
# Imports
# ---------------------------
from .utils import decode_ica, check_group, check_type, check_type_or_value, encode_dict, debug
from .containers import ListContainer
from .base import Element, Block, Inline
# ---------------------------
# Classes
# ---------------------------
class Table(Block):
"""Table, composed of a table head, one or more table bodies, and a
a table foot. You can also specify captions, short captions, column alignments,
and column widths.
Example:
>>> x = [Para(Str('Something')), Para(Space, Str('else'))]
>>> c1 = TableCell(*x)
>>> c2 = TableCell(Header(Str('Title')))
>>> row = TableRow(c1, c2)
>>>
>>> body = TableBody(row)
>>> head = TableHead(row)
>>> caption = Caption(Para(Str('Title')))
>>> table = Table(body, head=head, caption=caption)
TODO: UPDATE EXAMPLE
TODO: OFFER A SIMPLE WAY TO BUILD A TABLE, with e.g. .alignments and .widths
:param args: Table bodies
:type args: :class:`TableBody`
:param head: Table head
:type head: :class:`TableHead`
:param foot: Table foot
:type foot: :class:`TableFoot`
:param caption: The caption of the table (with optional short caption)
:type caption: :class:`Caption`
:param colspec: list of (alignment, colwidth) tuples; one for each column
:type colspec: :class:`list` of (:class:`Alignment`, :class:`ColWidth`)
:param identifier: element identifier (usually unique)
:type identifier: :class:`str`
:param classes: class names of the element
:type classes: :class:`list` of :class:`str`
:param attributes: additional attributes
:type attributes: :class:`dict`
:Base: :class:`Block`
:param alignment: List of row alignments
(either 'AlignLeft', 'AlignRight', 'AlignCenter' or 'AlignDefault').
:type alignment: [:class:`str`]
:param colwidth: Fractional column widths
:type colwidth: [:class:`float` | "ColWidthDefault"]
"""
__slots__ = ['_content', '_head', '_foot', '_caption', 'colspec',
'identifier', 'classes', 'attributes', 'cols']
_children = ['head', 'content', 'foot', 'caption']
def __init__(self, *args, head=None, foot=None, caption=None,
colspec=None, identifier='', classes=[], attributes={}):
self._set_ica(identifier, classes, attributes)
self._set_content(args, TableBody)
self.caption = caption
self._set_table_width() # also fills in colspec if it's empty
self.head = head
self.foot = foot
# Colspec is a list of (alignment, width) tuples
# TODO: add validation to colspec
self.colspec = [(check_group(a, TABLE_ALIGNMENT),
check_type_or_value(w, (float, int), 'ColWidthDefault'))
for (a, w) in colspec] if colspec else [('AlignDefault', 'ColWidthDefault')] * self.cols
self._validate_colspec()
def _set_table_width(self):
self.cols = 0
if self.content and self.content[0].content:
self.cols = count_columns_in_row(self.content[0].content[0].content) # Table -> First TableBody -> IntermediateBody -> First Row
def _validate_cols(self, block):
if not len(block.content):
return
block_cols = count_columns_in_row(block.content[0].content)
if not self.cols:
self.cols = block_cols
elif self.cols != block_cols:
msg = f'\n\nInvalid number of columns in table {block.location}.'
msg += f'Expected {self.cols} but received {block_cols}\n'
raise IndexError(msg)
def _validate_colspec(self):
if self.cols != len(self.colspec):
msg = '\n\nInvalid number of colspec tuples.'
msg += 'Expected {} but received {}\n'.format(self.cols, len(self.colspec))
raise IndexError(msg)
@property
def head(self):
return self._head
@head.setter
def head(self, value):
self._head = check_type(value, TableHead) if value else TableHead()
self._head.parent = self
self._head.location = 'head'
self._validate_cols(self.head)
@property
def foot(self):
return self._foot
@foot.setter
def foot(self, value):
self._foot = check_type(value, TableFoot) if value else TableFoot()
self._foot.parent = self
self._foot.location = 'foot'
self._validate_cols(self.foot)
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._caption = check_type_or_value(value, Caption, None)
if self._caption is not None:
self._caption.parent = self
self._caption.location = 'caption'
def _slots_to_json(self):
ica = self._ica_to_json()
caption = self.caption.to_json()
colspec = [[{'t': a}, colspec_to_json(c)] for a, c in self.colspec]
head = self.head.to_json()
bodies = [body._slots_to_json() for body in self.content]
foot = self.foot.to_json()
return [ica, caption, colspec, head, bodies, foot]
class TableHead(Block):
"""
The head of a table, containing a one or more head rows, plus optional attributes
:param row: head rows
:type row: :class:`str`
:param identifier: element identifier (usually unique)
:type identifier: :class:`str`
:param classes: class names of the element
:type classes: :class:`list` of :class:`str`
:param attributes: additional attributes
:type attributes: :class:`dict`
:Base: :class:`Block`
"""
__slots__ = ['_content', 'identifier', 'classes', 'attributes']
_children = ['content']
def __init__(self, *args, identifier='', classes=[], attributes={}):
self._set_ica(identifier, classes, attributes)
self._set_content(args, TableRow)
def to_json(self):
return [self._ica_to_json(), self.content.to_json()]
class TableFoot(Block):
"""
The foot of a table, containing a one or more foot rows, plus optional attributes
:param row: foot rows
:type row: :class:`str`
:param identifier: element identifier (usually unique)
:type identifier: :class:`str`
:param classes: class names of the element
:type classes: :class:`list` of :class:`str`
:param attributes: additional attributes
:type attributes: :class:`dict`
:Base: :class:`Block`
"""
__slots__ = ['_content', 'identifier', 'classes', 'attributes']
_children = ['content']
def __init__(self, *args, identifier='', classes=[], attributes={}):
self._set_ica(identifier, classes, attributes)
self._set_content(args, TableRow)
def to_json(self):
return [self._ica_to_json(), self.content.to_json()]
class TableBody(Block):
"""
Body of a table, containing a list of intermediate head rows, a list of table body rows, row_head_columns, plus optional attributes
:param row: head rows
:type row: :class:`str`
:param head: Intermediate head (list of table rows)
:type head: :class:`list` of :class:`TableRow`
:param row_head_columns: number of columns on the left that are considered column headers (default: 0)
:type row_head_columns: class:`int`
:param identifier: element identifier (usually unique)
:type identifier: :class:`str`
:param classes: class names of the element
:type classes: :class:`list` of :class:`str`
:param attributes: additional attributes
:type attributes: :class:`dict`
:Base: :class:`Block`
"""
__slots__ = ['_content', '_head', 'row_head_columns', 'identifier', 'classes', 'attributes']
_children = ['content', 'head']
def __init__(self, *args, head=None, row_head_columns=0,
identifier='', classes=[], attributes={}):
self._set_ica(identifier, classes, attributes)
self._set_content(args, TableRow)
self.head = head
self.row_head_columns = check_type(row_head_columns, int)
@property
def head(self):
return self._head
@head.setter
def head(self, value):
if value:
value = value.list if isinstance(value, ListContainer) else list(value)
else:
value = []
self._head = ListContainer(*value, oktypes=TableRow, parent=self)
self._head.location = 'head'
def _slots_to_json(self):
return [self._ica_to_json(), self.row_head_columns,
self.head.to_json(), self.content.to_json()]
class TableRow(Element):
"""
Table Row
:param args: cells
:type args: :class:`TableCell`
:param identifier: element identifier (usually unique)
:type identifier: :class:`str`
:param classes: class names of the element
:type classes: :class:`list` of :class:`str`
:param attributes: additional attributes
:type attributes: :class:`dict`
:Base: :class:`Element`
"""
__slots__ = ['_content', 'identifier', 'classes', 'attributes']
_children = ['content']
def __init__(self, *args, identifier='', classes=[], attributes={}):
self._set_ica(identifier, classes, attributes)
self._set_content(args, TableCell)
def to_json(self):
return [self._ica_to_json(), self.content.to_json()]
class TableCell(Element):
"""
Table Cell
:param args: elements
:type args: :class:`Block`
:param alignment: row alignment
(either 'AlignLeft', 'AlignRight', 'AlignCenter' or 'AlignDefault').
:type alignment: :class:`str`
:param rowspan: number of rows occupied by a cell (height of a cell)
:type rowspan: :class:`int`
:param colspan: number of columns occupied by a cell (width of a cell)
:type colspan: :class:`int`
:param identifier: element identifier (usually unique)
:type identifier: :class:`str`
:param classes: class names of the element
:type classes: :class:`list` of :class:`str`
:param attributes: additional attributes
:type attributes: :class:`dict`
:Base: :class:`Element`
"""
__slots__ = ['_content', 'alignment', 'rowspan', 'colspan',
'identifier', 'classes', 'attributes']
_children = ['content']
def __init__(self, *args, alignment='AlignDefault', rowspan=1, colspan=1,
identifier='', classes=[], attributes={}):
self._set_ica(identifier, classes, attributes)
self._set_content(args, Block)
self.alignment = check_group(alignment, TABLE_ALIGNMENT)
self.rowspan = rowspan
self.colspan = colspan
if (self.rowspan <= 0):
raise TypeError('Cell rowspan must be positive')
if (self.colspan <= 0):
raise TypeError('Cell colspan must be positive')
def to_json(self):
return [self._ica_to_json(), {'t': self.alignment}, self.rowspan,
self.colspan, self.content.to_json()]
class Caption(Element):
"""
Table caption with optional short caption
:param args: caption
:type args: :class:`Block`
:param short_caption: Short caption
:type short_caption: :class:`list` of :class:`Inline`
:param identifier: element identifier (usually unique)
:Base: :class:`Element`
"""
__slots__ = ['_content', '_short_caption']
_children = ['content', 'short_caption']
def __init__(self, *args, short_caption=None):
self._set_content(args, Block)
self.short_caption = short_caption
def to_json(self):
short_caption = None if self.short_caption is None else self.short_caption.to_json()
return [short_caption, self.content.to_json()]
@property
def short_caption(self):
return self._short_caption
@short_caption.setter
def short_caption(self, value):
if value:
value = value.list if isinstance(value, ListContainer) else list(value)
self._short_caption = ListContainer(*value, oktypes=Inline, parent=self)
self._short_caption.location = 'short_caption'
else:
self._short_caption = None
# ---------------------------
# Constants
# ---------------------------
TABLE_ALIGNMENT = {'AlignLeft', 'AlignRight', 'AlignCenter', 'AlignDefault'}
TABLE_WIDTH = {'ColWidthDefault'}
# ---------------------------
# Functions
# ---------------------------
def count_columns_in_row(row):
return sum(cell.colspan for cell in row)
def colspec_to_json(c):
return {'t': c} if c == 'ColWidthDefault' else encode_dict('ColWidth', c)
def cell_from_json(c):
return TableCell(*c[4], alignment=c[1], rowspan=c[2], colspan=c[3],
**decode_ica(c[0]))
def row_from_json(c):
return TableRow(*map(cell_from_json, c[1]), **decode_ica(c[0]))
def body_from_json(c):
row_head_columns = c[1]
head = map(row_from_json, c[2])
body = map(row_from_json, c[3])
return TableBody(*body, head=head, row_head_columns=row_head_columns,
**decode_ica(c[0]))
def table_from_json(c):
# Attr Caption [ColSpec] TableHead [TableBody] TableFoot
ica = decode_ica(c[0])
caption = Caption(*c[1][1], short_caption=c[1][0])
colspec = c[2]
head = TableHead(*map(row_from_json, c[3][1]), **decode_ica(c[3][0]))
bodies = map(body_from_json, c[4])
foot = TableFoot(*map(row_from_json, c[5][1]), **decode_ica(c[5][0]))
return Table(*bodies, head=head, foot=foot, caption=caption, colspec=colspec, **ica)
| bsd-3-clause | b2ee32d8d8e2844956e7500d9248d785 | 32.893035 | 141 | 0.606018 | 3.740049 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.